コード例 #1
0
def BatchGradientDescent(my_Y, Y, X_mat, thata):
    """
    批量梯度下降(未加惩罚项)
    """
    my_copy = []
    for i in my_Y:
        my_copy.append(i)

    error = CostFunction(Y, my_copy)
    X = array(X_mat)

    while (1):
        new_thata = []
        for i in range(0, len(thata)):
            #temp = SumMy_Y_Y(my_copy,Y,X, i)
            temp = SumMy_Y_Y(my_copy, Y, X, i)
            flag = thata[i] - alpha * temp / DataMaxSize
            new_thata.append(flag)

        thata = new_thata

        my_copy = array(
            dot(X_mat,
                array(new_thata).reshape(len(thata), 1)).reshape(1, len(Y)))[0]
        new_error = CostFunction(Y, my_copy)
        print abs(new_error - error)
        if abs(new_error - error) <= e:
            break

        error = new_error

    B = reFeature(thata)
    return my_copy, B
コード例 #2
0
def TheLeastSquareMethod(X, Y):
    """
    最小二乘法
    """

    regula = eye(ORDER + 1)
    X_matrix = []
    for i in range(ORDER + 1):
        X_matrix.append(array(X)**i)
    X_matrix = mat(X_matrix).T
    Y_matrix = array(Y).reshape((len(Y), 1))

    X_matrix_T = X_matrix.T
    #print dot(X_matrix_T,X_matrix)
    B = dot(dot(dot(X_matrix_T, X_matrix).I, X_matrix_T), Y_matrix)
    B1 = dot(dot((dot(X_matrix_T, X_matrix) + lamda * regula).I, X_matrix_T),
             Y_matrix)
    result = dot(X_matrix, B)
    result_reg = dot(X_matrix, B1)
    return X_matrix, Y_matrix, B, result, result_reg, B1
コード例 #3
0
ファイル: data.py プロジェクト: w20ss11/python_ml
import sys
fr=open('D:\eclipse_workspace\Classify\data\data.txt')
macList=['c0:38:96:25:5b:c3','e0:05:c5:ba:80:40','b0:d5:9d:46:a3:9b','42:a5:89:51:c7:dd']
X=empty((4,60),numpy.int8)
for line in fr:
    parts=line.split(',')
    try:
        poi=macList.index(parts[2])
        print('poi',poi)
        if poi!='-1':
            print('try parts[2]:',parts[2])
            lie=int(parts[-1].strip())-1
            X[poi,lie] = parts[1]
    except :
        pass
        #print('haha',parts[2])
    else:
        print('no error')
print("final:",type(list),type(1),type(macList))
print(X)
w=ones((4,1))
b=1
print(transpose(w))
z=dot(transpose(w),X)+1
y1=zeros((30,1))
y2=ones((30,1))
y=row_stack((y1,y2))
print(y)
#plt.plot(list)
#plt.show()
コード例 #4
0
             Y_matrix)
    result = dot(X_matrix, B)
    result_reg = dot(X_matrix, B1)
    return X_matrix, Y_matrix, B, result, result_reg, B1


if __name__ == "__main__":
    X, Y, Xc, Yc, ALLX, ALLY = CreatData()

    X_matrix, Y_matrix, B, Y_tlsm, Y_tlsm_reg, B_reg = TheLeastSquareMethod(
        X, Y)
    #最小二乘法求得的回归曲线,矩阵表示

    Xc_ = getXmat(Xc)

    my_Yc = array(dot(Xc_, B).reshape(1, len(Yc)))[0]
    my_Yc_reg = array(dot(Xc_, B_reg).reshape(1, len(Yc)))[0]

    loss_tlsm = CostFunction(Yc, my_Yc)  # 方均根误差
    loss_tlsm_reg = CostFunction(Yc, my_Yc_reg)
    loss_tlsm_1 = CostFunction(Y, Y_tlsm)  # 方均根误差
    loss_tlsm_reg_1 = CostFunction(Y, Y_tlsm_reg)
    print loss_tlsm_1
    print loss_tlsm_reg_1

    plt.figure("MachineLearningProjectOne")
    plt.figtext(0.3,
                0.85,
                'Cost Without Regulation:' + str(loss_tlsm),
                color='red',
                ha='center')
コード例 #5
0
    print('haha')
    cost=(k*x-y)**2
    w_grad=1
    return [cost,w_grad]
def fun1():
    return 1

k0=1
x=1
y=1
max_iterations=10
#scipy.optimize.minimize(fun,k0,args=(x,y,),options = {'maxiter': max_iterations})
#scipy.optimize.leastsq(fun1,)

a=ones(2)
b=ones(2).reshape(2,1)
b[0,0]=2
b[1,0]=4
print(a,b)
print(a*b)
print(dot(a,b))

print('-------------------------')

import sys
sys.path.append('D:\eclipse_workspace\py_base\src') 
group,labels=kNN.createDataSet()
print('group:',group)
print('labels:',labels)
res=kNN.classify0([0,0], group, labels, 3)
print(res)
コード例 #6
0
    '''
    X_lemad = array(X) / 2.5  #特征收缩
    X_mat_ = []
    for i in range(ORDER + 1):
        X_mat_.append(X_lemad**i)
    X_mat_ = mat(X_mat_).T
    #thata = array(B.reshape(1,len(B)))[0] +uniform(0,0.1)
    X_matrix = getXmat(X)
    thata_ = []
    for i in range(10):
        thata_.append(uniform(-1, 1))
    thata0 = mat(array(thata_).reshape(ORDER + 1, 1))
    thata1 = mat([1, 1])
    thata1 = thata0

    my_Y = array(dot(X_matrix, thata0).reshape(1, len(Y)))[0]

    thata = array(thata0.reshape(1, ORDER + 1))[0]
    Y_bgd, B = BatchGradientDescent(my_Y, Y, X_mat_, thata)

    thata = array(thata0.reshape(1, len(B)))[0]
    Y_bgd_reg, B_reg = BatchGradientDescentReg(my_Y, Y, X_mat_, thata)

    my_Yc = array(dot(Xc_, B).reshape(1, len(Yc)))[0]
    my_Yc_reg = array(dot(Xc_, B_reg).reshape(1, len(Yc)))[0]

    loss_tlsm = CostFunction(Yc, my_Yc)  # 方均根误差
    loss_tlsm_reg = CostFunction(Yc, my_Yc_reg)
    loss_tlsm_1 = CostFunction(Y, Y_bgd)  # 方均根误差
    loss_tlsm_reg_1 = CostFunction(Y, Y_bgd_reg)
    print "without Regulation: " + str(loss_tlsm_1)
コード例 #7
0
def bicgstab(X,Y,my_Y,B):
    '''
    #稳定双共轭梯度下降
    '''
    my_Y_copy=[]
    for i in my_Y:
      my_Y_copy.append(i)
      
    error = CostFunction(Y,my_Y_copy)
    
    R0star = Y - dot(X,B)
    R0 = Y - dot(X,B)
    rho0 = 1
    alp0 = 1
    w0 = 1
    V0 =mat(zeros(len(Y)).reshape(len(Y),1))
    P0 = mat(zeros(len(Y)).reshape(len(Y),1))
    #print R0
    while 1:
        rho1 = array(dot(R0star.T, R0))[0][0]
        beta = (rho1/rho0) * (alp0/w0)
        P1 = R0 + beta*(P0 - w0*V0)
        
        V1 = dot(X,P1)
        alp0 = rho1/(array(dot(R0star.T,V1))[0][0])
        h = B + alp0 * P1
        my_Y_copy = array(dot(X,array(h).reshape(len(h),1)).reshape(1,len(Y)))[0]
        new_error = CostFunction(Y,my_Y_copy) 
        if abs(new_error -error) <=e:
            B=h
            break
        error = new_error
        S = R0 - alp0*V1
        
        t = dot(X,S)
        w1 = array(dot(t.T, S))[0][0]/array(dot(t.T, t))[0][0]
        B = h + w1*S
        my_Y_copy = array(dot(X,array(B).reshape(len(B),1)).reshape(1,len(Y)))[0]
        new_error = CostFunction(Y,my_Y_copy) 
        if abs(new_error -error) <=e:
            break
        R0 = S - w1 * t
        rho0 = rho1
        P0 = P1
        V0 =V1
        w0 = w1
        error = new_error     
    return dot(X,B),B