コード例 #1
0
 def test_normalCase(self):                                   
     output = dm.meanNormalization(np.random.rand(10,2), 1)
     self.assertAlmostEqual(float(sum(sum(output))), sum(sum(self.correctOutput)))
コード例 #2
0
#will need to reload modules if changing
#dataNormalization or computeCostMulti functions
reload(dn)
reload(cm)

print 'Loading data...'

#read in data
fileIn = np.loadtxt('ex1data2.txt', delimiter = ',')
m = np.shape(fileIn)[0]
n = np.shape(fileIn)[1]
X = fileIn[:, :n-1]
y = fileIn[:, n-1:]

print 'Normalizing features...'
X, mu, sigma = dn.meanNormalization(X, 1)

#add column of ones for intercept
X = np.append(np.ones((m,1)), X, axis = 1)

print 'Running gradient descent...'

#set learning rate and number of iterations
alpha = .01
num_iters = 400

#initialize theta with zeros
theta = np.zeros((n,1))

J, theta, J_history = cm.computeCostMulti(X, y, theta, alpha, num_iters)
コード例 #3
0
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import dataNormalization as dn

from sklearn.cluster import KMeans
#from sklearn import datasets

np.random.seed(5)

#read data into a np.ndarray
rawData = np.loadtxt('practiceClusteringData.csv', delimiter = ',')

#split X and y from the raw data
X = rawData[:,:3]
print X
X = dn.meanNormalization(X, 1)
print X
y = rawData[:,3]

#centers = [[1, 1], [-1,-1], [1, -1]]
#iris = datasets.load_iris()
#X = iris.data
#y = iris.target

fignum = 1
fig = plt.figure(fignum, figsize = (4, 3))
plt.clf()
ax = Axes3D(fig, rect = [0, 1, .95, 1], elev = 20, azim = 134)


plt.cla()