示例#1
0
def oneVsall(X,y,num_labels,lamdba):
    from batch_gradient_update import batch_gradient_update
    import numpy as np
    m,n=X.shape
    all_theta=np.zeros([num_labels,n+1])
    X0=np.ones([m,1])
    X=np.concatenate((X0,X),axis=1)
    for c in range(num_labels):
        print 'c:',c
        initial_theta=np.zeros([n+1,1])
        #print y%10
        theta=batch_gradient_update(initial_theta,X,np.array(((y%10)==c),dtype=int))

        all_theta[c,:]=np.reshape(theta,[n+1,])
    return all_theta
示例#2
0
def oneVsall(X, y, num_labels, lamdba):
    from batch_gradient_update import batch_gradient_update
    import numpy as np
    m, n = X.shape
    all_theta = np.zeros([num_labels, n + 1])
    X0 = np.ones([m, 1])
    X = np.concatenate((X0, X), axis=1)
    for c in range(num_labels):
        print 'c:', c
        initial_theta = np.zeros([n + 1, 1])
        #print y%10
        theta = batch_gradient_update(initial_theta, X,
                                      np.array(((y % 10) == c), dtype=int))

        all_theta[c, :] = np.reshape(theta, [
            n + 1,
        ])
    return all_theta
示例#3
0
X, y = make_blobs(n_samples=400, centers=2, random_state=0, cluster_std=1)
# after featureNormalize it accuarcy could get 89%
X,X_mu,X_sigma=featureNormalize(X)


#plot_data(X,y)
y=np.reshape(y,(y.size,1))
m,n=X.shape
X=np.concatenate((np.ones([len(X[:,0]),1]),X),axis=1)
initial_theta=np.zeros([n+1,1])


#initial_theta=np.array([1,1,1])
# test is the cost_function  ok?
cost,grad=cost_function(initial_theta,X,y)


# batch_gradient_update error!!! wrong theta
theta=batch_gradient_update(initial_theta,X,y)
print theta

prob=sigmoid_function(np.dot(X,theta))
print prob
prob[prob>0.5]=1.0
prob[prob<0.5]=0.0
print prob
y=np.reshape(y,prob.shape)
print "accuracy:",tuple(1-sum(abs(prob-y))/100)


示例#4
0
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
import scipy as sp
#X,y=read_data("ex2data1.txt")
X, y = make_blobs(n_samples=400, centers=2, random_state=0, cluster_std=1)
# after featureNormalize it accuarcy could get 89%
X, X_mu, X_sigma = featureNormalize(X)

#plot_data(X,y)
y = np.reshape(y, (y.size, 1))
m, n = X.shape
X = np.concatenate((np.ones([len(X[:, 0]), 1]), X), axis=1)
initial_theta = np.zeros([n + 1, 1])

#initial_theta=np.array([1,1,1])
# test is the cost_function  ok?
cost, grad = cost_function(initial_theta, X, y)

# batch_gradient_update error!!! wrong theta
theta = batch_gradient_update(initial_theta, X, y)
print theta

prob = sigmoid_function(np.dot(X, theta))
print prob
prob[prob > 0.5] = 1.0
prob[prob < 0.5] = 0.0
print prob
y = np.reshape(y, prob.shape)
print "accuracy:", tuple(1 - sum(abs(prob - y)) / 100)