Beispiel #1
0
iris = load_iris()
X = iris.data[:, [2, 3]]
y = iris.target

test_portion = 0.3
seed = 1
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=test_portion,
                                                    random_state=seed,
                                                    stratify=y)

sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)

c = 100
seed = 1
lr = LogisticRegression(C=c, random_state=seed)
lr.fit(X_train_std, y_train)

X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))

plot_decision_regions(X_combined_std,
                      y_combined,
                      classifier=lr,
                      test_idx=range(105, 150))
plt.legend(loc='upper left')
plt.show()
        if h > m + s:
            y.append(1.)
            X.append(temp[i, :])
        elif h < m - s:
            y.append(-1.)
            X.append(temp[i, :])
else:
    for i, h in enumerate(humidity):
        if h > m:
            y.append(1.)
            X.append(temp[i, :])
        elif h <= m:
            y.append(-1.)
            X.append(temp[i, :])

y = np.array(y)
X = np.array(X)

# Fit perceptron
p = Perceptron()
p.fit(X, y)

# Display results
f4 = plt.figure(5)
utilities.plot_decision_regions(X,
                                y,
                                p,
                                title='Humid or Dry',
                                xlabel='normalized temperature',
                                ylabel='normalized light intensity')
Beispiel #3
0
        if h > m + s:
            y.append(1.)
            X.append(temp[i, :])
        elif h < m - s:
            y.append(-1.)
            X.append(temp[i, :])
else:
    for i, h in enumerate(humidity):
        if h > m:
            y.append(1.)
            X.append(temp[i, :])
        elif h <= m:
            y.append(-1.)
            X.append(temp[i, :])

y = np.array(y)
X = np.array(X)

# Fit perceptron
p = Perceptron()
p.fit(X, y)

# Display results
f4 = plt.figure(5)
utilities.plot_decision_regions(X,
                                y,
                                p,
                                title='Humid or Dry',
                                xlabel='normalized temperature',
                                ylabel='normalized pressure')
file.write('Test Set Accuracy : {} \n'.format(ACCURACY_SCORE_TEST))
file.write(
    'Training Set Normalized Mutual Information  : {} \n'.format(NMI_TRAIN))
file.write('Test Set Normalized Mutual Information  : {} \n'.format(NMI_TEST))
file.write('Jaccard Similarity Score for train set : {} \n'.format(
    jaccard_score(y_train, y_train_pred)))
file.write('Jaccard Similarity Score for test set : {} \n'.format(
    jaccard_score(y_test, y_test_pred)))
file.close()

# plot the lower dimensional embedding
#multi_timepoints_plot(X_train_tsne, y_train, time_points) # for the full dataset
# single_timepoint_plot(X_train, y_train, [7, 8]) # plot the redcued dimensional embedding

# plot the svm decision boundaries for train set
plot_decision_regions(X_train, y_train,
                      classifier=svm)  # plot the regional boundaries
plt.savefig(SAVE_PATH + '/svm_model_viz.jpg')

# plot the svm decision boundaries for test set
plot_decision_regions(X_test, y_test,
                      classifier=svm)  # plot the regional boundaries
plt.savefig(SAVE_PATH + '/svm_model_viz_test.jpg')

# plot the precision recall call for different threshold probabilities for train set
plot_precision_recall_curve(y_train, scores_train)
plt.savefig(SAVE_PATH + '/PR_train.jpg')

# plot the precision recall call for different threshold probabilities for test set
plot_precision_recall_curve(y_test, scores_test)
plt.savefig(SAVE_PATH + '/PR_test.jpg')
for l, c, m in zip(np.unique(y_train), colors, markers):
    plt.scatter(X_train_isomap[y_train == l, 0],
                X_train_isomap[y_train == l, 1],
                c=c,
                label=l,
                marker=m)

plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower right')
plt.show()

lr = LogisticRegression()
lr.fit(X_train_isomap, y_train)
plot_decision_regions(X_train_isomap, y_train, classifier=lr)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc='lower left')
plt.show()

# test data visualization

X_test_isomap = isomap.transform(X_test_std)
plot_decision_regions(X_test_isomap, y_test, classifier=lr)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc='lower left')
plt.show()

# reducing in to 3 dimensions