Пример #1
0
def decision_boundary(X, Y, w, b):
    x_min, x_max = -2, 4
    y_min, y_max = -5, 3
    xx, yy = np.meshgrid(np.arange(x_min, x_max, .05),
                     np.arange(y_min, y_max, .05))

    if len(w) > 2:
        XX = polynomial_transform(np.vstack((xx.ravel(), yy.ravel())).T)
    else:
        XX = np.vstack((xx.ravel(), yy.ravel())).T
    Z = np.dot(XX, w) + b

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z > 0, cmap=plt.cm.Paired)
    plt.axis('off')

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=Y, cmap='winter')
    plt.axis([-2, 4, -5, 3])
    plt.draw()
# Load the dataset
with np.load('./' + dataset + '.npz') as data:
    X = data['X']
    Y = data['Y']


algorithms = ['perceptron', 'logistic_regression', 'polynomial_regression']
algorithm = algorithms[1]

Xorig = X
if algorithm == 'perceptron':
    eps = 1.
elif algorithm == 'logistic_regression':
    eps = 5.
elif algorithm == 'polynomial_regression':
    X = polynomial_transform(X)
    eps = 100

nSamples = X.shape[0]
plt.ion()

w = np.zeros(X.shape[1])
b = 0.

for i in range(100):
    if algorithm == 'perceptron':
        # Predicted outputs
        T = (np.dot(X, w) + b) > 0
        C = T
    else:
        # Predicted outputs
Пример #3
0
# Load the dataset
with np.load('./' + dataset + '.npz') as data:
    X = data['X']
    Y = data['Y']

algorithms = ['perceptron', 'logistic_regression', 'polynomial_regression']
algorithm = algorithms[1]

Xorig = X
if algorithm == 'perceptron':
    eps = 1.
elif algorithm == 'logistic_regression':
    eps = 1. / np.amax(np.sum(X**2, 1))
elif algorithm == 'polynomial_regression':
    X = polynomial_transform(X)
    eps = 20 / np.amax(np.sum(X**2, 1))

nSamples = X.shape[0]
plt.ion()

w = np.zeros(X.shape[1])
b = 0.

T = np.zeros(nSamples)
C = np.zeros(nSamples)
sum_gradients = np.zeros(X.shape[1])
gradients = np.zeros(nSamples)

for i in range(100 * nSamples):
    # Draw an example at random