예제 #1
0
y_data = np.zeros((m, 1))
x_data[:, 0] = 1
for i in range(m):
    line_tempt = line_list[i].split(",")
    for j in range(len(line_tempt)):
        if j != len(line_tempt) - 1:
            x_data[i, j + 1] = line_tempt[j]
        else:
            y_data[i, 0] = line_tempt[j]
# print(x_data)
# print(y_data)
file.close()
# description:x_data is a m*3 matrix, y_data is a vector
# loading end
# ignore the feature x0
x_data, mu, std = featureNormalize.feature_normalize(x_data)
#  trick!!: normalize data before adding x0
# =======================Part2. Gradient Descent ===========================
# initialize some params
alpha = 0.01
num_iters = 5000
theta = np.zeros((3, 1))
# my gradientDescent support multi-variables
theta, cost_history = gradientDescent.gradient_descent(x_data, y_data, theta,
                                                       alpha, num_iters)
# print(theta)
# =======================Part3. Predict ================
input_data = np.array([[1, 1650, 3]], dtype=np.float64)
input_data[0, 1:] -= mu[1:]
input_data[0, 1:] /= std[1:]
print("predict price is {}".format(np.dot(input_data, theta)))
예제 #2
0
plt.axis([0, 13, 0, 150])
#plt.xticks(list(range(0,13,2)))
#plt.yticks(list(range(0,200,50)))

input('Program paused. Press ENTER to continue')

# ===================== Part 6 : Feature Mapping for Polynomial Regression =====================
# One solution to this is to use polynomial regression. You should now
# complete polyFeatures to map each example into its powers
#

p = 8

# Map X onto Polynomial Features and Normalize
X_poly = pf.poly_features(X, p)
X_poly, mu, sigma = fn.feature_normalize(X_poly)
X_poly = np.c_[np.ones(m), X_poly]

# Map X_poly_test and normalize (using mu and sigma)
X_poly_test = pf.poly_features(Xtest, p)
X_poly_test -= mu
X_poly_test /= sigma
X_poly_test = np.c_[np.ones(X_poly_test.shape[0]), X_poly_test]

# Map X_poly_val and normalize (using mu and sigma)
X_poly_val = pf.poly_features(Xval, p)
X_poly_val -= mu
X_poly_val /= sigma
X_poly_val = np.c_[np.ones(X_poly_val.shape[0]), X_poly_val]

print('Normalized Training Example 1 : \n{}'.format(X_poly[0]))
예제 #3
0
# plt.figure()
# plt.scatter(X[:, 0], X[:, 1], facecolors='none', edgecolors='b', s=20)
# plt.axis('equal')
# plt.axis([0.5, 6.5, 2, 8])
# plt.show()

input('Program paused. Press ENTER to continue')

# ===================== Part 2: Principal Component Analysis =====================
# You should now implement PCA, a dimension reduction technique. You
# should complete the code in pca.py
#
print('Running PCA on example dataset.')

# Before running PCA, it is important to first normalize X
X_norm, mu, sigma = fn.feature_normalize(X)

# Run PCA
U, S = pca.pca(X_norm)

# rk.draw_line(mu, mu + 1.5 * S[0] * U[:, 0])
# rk.draw_line(mu, mu + 1.5 * S[1] * U[:, 1])
# plt.show()
print('Top eigenvector: \nU[:, 0] = {}'.format(U[:, 0]))
print('You should expect to see [-0.707107 -0.707107]')

input('Program paused. Press ENTER to continue')

# ===================== Part 3: Dimension Reduction =====================
# You should now implement the projection step to map the data onto the
# first k eigenvectors. The code will then plot the data in this reduced
예제 #4
0
    print('Loading data ...\n')
    # Load Data
    data = load_data('ex1data2.txt')
    data = np.split(data, [2], axis=1)
    X = data[0]
    y = data[1]
    m = len(y)
    # Print out some data points
    print('First 10 examples from the dataset: \n')
    for i in range(10):
        print(' x = [%.0f %.0f], y = %.0f \n' % (X[i][0], X[i][1], y[i]))
    # pause_func()

    # Scale features and set them to zero mean
    print('Normalizing Features ...\n')
    X, mu, sigma = feature_normalize(X)
    # Add intercept term to X
    X = np.append(np.ones((m, 1)), X, axis=1)

    # ================ Part 2: Gradient Descent ================
    print('Running gradient descent ...\n')
    # Number of iterations (loops)
    num_iters = 400
    # Try some other values of alpha
    alpha = 1
    theta = np.zeros((3, 1))
    theta, J_history_0 = gradient_descent_multi(X, y, theta, alpha, num_iters)
    print('theta is \n', theta, '\n')

    alpha = 0.3
    theta = np.zeros((3, 1))
data = np.loadtxt('ex1data2.txt', delimiter=',')
X = data[:, :2]
y = data[:, 2]
m = y.size

# Print out some data points
print('First 10 examples from the dataset:')
for i in range(10):
    print('x = {}, y = {}'.format(X[i], y[i]))

input('Program paused. Press enter to continue.\n')

# Scale features and set them to zero mean
print('Normalizing Features ...')

(X, mu, sigma) = featureNormalize.feature_normalize(X)

# Add intercept term to X
X = np.c_[np.ones(m), X]

# ================ Part 2: Gradient Descent ================

# ====================== YOUR CODE HERE ======================
# Instructions: We have provided you with the following starter
#               code that runs gradient descent with a particular
#               learning rate (alpha). 
#
#               Your task is to first make sure that your functions - 
#               computeCost and gradientDescent already work with 
#               this starter code and support multiple variables.
#
예제 #6
0
data = scio.loadmat('ex7data1.mat')
X = data['X']  #两个特征

# 可视化
plt.figure()
plt.scatter(X[:, 0], X[:, 1], facecolors='none', edgecolors='b', s=20)
plt.axis('equal')
plt.axis([0.5, 6.5, 2, 8])

input('Program paused. Press ENTER to continue')
'''第2部分 实现PCA 进行数据压缩'''

print('Running PCA on example dataset.')

# 在PCA之前 要对特征进行缩放
X_norm, mu, sigma = fn.feature_normalize(X)

# 执行PCA 返回U矩阵  和S矩阵
U, S = pca.pca(X_norm)

#对比两个不同的特征向量 U[:,0]更好 投影误差最小  U中的各个特征向量(列)都是正交的  2D->1D 取前1个特征向量 作为Ureduce
rk.draw_line(mu, mu + 1.5 * S[0] * U[:, 0])
rk.draw_line(mu, mu + 1.5 * S[1] * U[:, 1])

print('Top eigenvector: \nU[:, 0] = {}'.format(
    U[:, 0]))  #利用PCA得到的特征向量矩阵Ureduce(降维后子空间的基)
print('You should expect to see [-0.707107 -0.707107]')

input('Program paused. Press ENTER to continue')
'''第3部分 得到降维后的样本点 再进行压缩重放'''
print('Dimension reductino on example dataset.')