Пример #1
0
def testRbf(k1=1.3):
    dataArr, labelArr = SMO.loadDataSet('testSetRBF.txt')
    b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1))
    dataMat = mat(dataArr)
    labelMat = mat(labelArr).transpose()
    svInd = nonzero(alphas.A > 0)[0]
    sVs = dataMat[svInd]
    labelLSV = labelMat[svInd]
    print("there are %d Support Vectors" % shape(sVs)[0])
    m, n = shape(dataMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs, dataMat[i, :], ('rbf', k1))
        predict = kernelEval.T * multiply(labelLSV, alphas[svInd]) + b
        if sign(predict) != sign(labelArr[i]): errorCount += 1
    print("the training error rate is: %f" % (float(errorCount) / m))
    dataArr, labelArr = SMO.loadDataSet('testSetRBF2.txt')
    errorCount = 0
    dataMat = mat(dataArr)
    labelMat = mat(labelArr).transpose()
    m, n = shape(dataMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs, dataMat[i, :], ('rbf', k1))
        predict = kernelEval.T * multiply(labelLSV, alphas[svInd]) + b
        if sign(predict) != sign(labelArr[i]): errorCount += 1
    print("the test error rate is: %f" % (float(errorCount) / m))
Пример #2
0
class TestSMOOnePoint(unittest.TestCase):
    def setUp(self):
        X = np.array([[1,1]])
        Y = np.array([1])
        self.SMOobj = SMO(X, Y, 0.1, 0.001, 0.001)
        self.SMOobj.SMO_main()

    def test_weight_correctness(self):
        self.assertEqual(self.SMOobj.get_weight(), np.array([0.0, 0.0]))

    def test_threshold_correctness(self):
        self.assertEqual(self.SMOobj.get_threshold(), 0.0)
Пример #3
0
def selectJ(i, oS, Ei):
    maxK = -1
    maxDeltaE = 0
    Ej = 0
    #set corresponding cached value as valid
    oS.eCache[i] = [1, Ei]
    #return the corresponding alpha of non-zero E
    validEcacheList = nonzero(oS.eCache[:, 0].A)[0]

    #select the alpha j that maximum abs(Ei - Ek)
    if (len(validEcacheList)) > 1:
        for k in validEcacheList:
            if k == i:
                continue
            Ek = calcEk(oS, k)
            deltaE = abs(Ei - Ek)
            if (deltaE > maxDeltaE):
                maxK = k
                maxDeltaE = deltaE
                Ej = Ek
        return maxK, Ej

    #if there is no such j, random select a j
    else:
        j = smo.selectJrand(i, oS.m)
        Ej = calcEk(oS, j)
    return j, Ej
Пример #4
0
def selectJ(i, oS, Ei):
    maxK = -1
    maxDeltaE = 0
    Ej = 0
    #set corresponding cached value as valid
    oS.eCache[i] = [1,Ei]
    #return the corresponding alpha of non-zero E
    validEcacheList = nonzero(oS.eCache[:,0].A)[0]

    #select the alpha j that maximum abs(Ei - Ek)
    if(len(validEcacheList)) > 1:
        for k in validEcacheList:
            if k == i:
                continue
            Ek = calcEk(oS, k)
            deltaE = abs(Ei - Ek)
            if(deltaE > maxDeltaE):
                maxK = k; maxDeltaE = deltaE; Ej = Ek
        return maxK, Ej

    #if there is no such j, random select a j
    else:
        j = smo.selectJrand(i, oS.m)
        Ej = calcEk(oS,j)
    return j, Ej
Пример #5
0
def selector(algo,func_details,popSize,Iter,succ_rate,mean_feval):
    function_name=func_details[0]
    lb=func_details[1]
    ub=func_details[2]
    dim=func_details[3]
    acc_err=func_details[4]
    obj_val=func_details[5]
       
    if(algo==0):
        x,succ_rate,mean_feval=smo.main(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter,acc_err,obj_val,succ_rate,mean_feval)       
    return x,succ_rate,mean_feval
Пример #6
0
def innerL(i, oS):
    Ei = calcEk(oS, i)
    if((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or \
            ( (oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
        #improve from simple SMP
        j, Ej = selectJ(i, oS, Ei)
        alphaIold = oS.alphas[i].copy()
        alphaJold = oS.alphas[j].copy()

        if (oS.labelMat[i] != oS.labelMat[j]):
            L = max(0, oS.alphas[j] - oS.alphas[i])
            H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
        else:
            L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
            H = min(oS.C, oS.alphas[j] + oS.alphas[i])

        if L == H:
            #print "L==H"
            return 0

        eta = 2.0 * oS.X[i, :] * oS.X[j, :].T - oS.X[i, :] * oS.X[
            i, :].T - oS.X[j, :] * oS.X[j, :].T
        if eta >= 0:
            #print "eta>=0"
            return 0

        oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
        oS.alphas[j] = smo.clipAlpha(oS.alphas[j], H, L)

        updateEk(oS, j)

        if (abs(oS.alphas[j] - alphaJold) < 0.00001):
            #print "j not moving enough"
            return 0

        oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold -
                                                           oS.alphas[j])
        updateEk(oS, i)

        #update constant b
        b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[
            i, :] * oS.X[i, :].T - oS.labelMat[j] * (
                oS.alphas[j] - alphaJold) * oS.X[i, :] * oS.X[j, :].T
        b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[
            i, :] * oS.X[j, :].T - oS.labelMat[j] * (
                oS.alphas[j] - alphaJold) * oS.X[j, :] * oS.X[j, :].T

        if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
        elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
        else: oS.b = (b1 + b2) / 2.0
        return 1
    else:
        return 0
Пример #7
0
def selector(algo,func_details,popSize,Iter,succ_rate,mean_feval):
    function_name=func_details[0]
    lb=func_details[1]
    ub=func_details[2]
    dim=func_details[3]
    acc_err=func_details[4]
    obj_val=func_details[5]
    #selection of different parameters
       
    if(algo==0):
        x,succ_rate,mean_feval=smo.main(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter,acc_err,obj_val,succ_rate,mean_feval) #getting attributes from different file      
    return x,succ_rate,mean_feval
Пример #8
0
def testRbf(k1=1.3):

    #load data
    dataArr, labelArr = smo.loadDataSet('testSetRBF.txt')
    #training
    b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf',k1))
    datMat = mat(dataArr)
    labelMat = mat(labelArr).transpose()

    #build support vector
    svInd = nonzero(alphas.A > 0)[0]
    sVs = datMat[svInd]
    labelSV = labelMat[svInd]
    print "there are %d Support Vectors" % shape(sVs)[0]

    #training error rate
    m,n = shape(datMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i,:],('rbf',k1))
        predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
        if sign(predict) != sign(labelArr[i]):
            errorCount += 1
    print "the test error rate is: %f" %(float(errorCount)/m)

    #test error rate
    dataArr, labelArr = smo.loadDataSet('testSetRBF2.txt')
    errorCount = 0
    datMat = mat(dataArr)
    labelmat = mat(labelArr).transpose()

    m,n = shape(datMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i,:],('rbf',k1))
        predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
        if sign(predict) != sign(labelArr[i]):
            errorCount += 1
    print "the test error rate is: %f" %(float(errorCount)/m)
Пример #9
0
def innerL(i, oS):
    Ei = calcEk(oS,i)
    if((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or \
            ( (oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
        #improve from simple SMP
        j, Ej = selectJ(i, oS, Ei)
        alphaIold = oS.alphas[i].copy()
        alphaJold = oS.alphas[j].copy()

        if(oS.labelMat[i] != oS.labelMat[j]):
            L = max(0, oS.alphas[j] - oS.alphas[i])
            H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
        else:
            L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
            H = min(oS.C, oS.alphas[j] + oS.alphas[i])

        if L==H:
            #print "L==H"
            return 0

        #Code different from plattSMO.py
        eta = 2.0*oS.K[i,j] - oS.K[i,i] - oS.K[j,j]

        if eta >= 0:
            #print "eta>=0"
            return 0

        oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
        oS.alphas[j] = smo.clipAlpha(oS.alphas[j], H, L)

        updateEk(oS,j)

        if(abs(oS.alphas[j] - alphaJold) < 0.00001):
            #print "j not moving enough"
            return 0

        oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])
        updateEk(oS,i)

        #update constant b
        b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i] - oS.labelMat[j]*(oS.alphas[j]-
            alphaJold)*oS.K[i,j]
        b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j] - oS.labelMat[j]*(oS.alphas[j]-
            alphaJold)*oS.K[j,j]

        if(0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
        elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
        else: oS.b = (b1+b2)/2.0
        return 1
    else: return 0
Пример #10
0
	def compute_alpha(self, X, y):
            """
            n_samples, n_features = X.shape
            K = self.kernel_matrix(X)
		# Solves
		# min 1/2 x^T P x + q^T x
		# s.t.
		#  Gx \coneleq h
		#  Ax = b
            P = cvxopt.matrix(np.outer(y, y) * K)
            q = cvxopt.matrix(-1 * np.ones(n_samples))

		# -a_i \leq 0
		# TODO(tulloch) - modify G, h so that we have a soft-margin classifier
            G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
            h_std = cvxopt.matrix(np.zeros(n_samples))

		# a_i \leq c
            G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))
            h_slack = cvxopt.matrix(np.ones(n_samples) * self._c)

            G = cvxopt.matrix(np.vstack((G_std, G_slack)))
            h = cvxopt.matrix(np.vstack((h_std, h_slack)))

            A = cvxopt.matrix(y, (1, n_samples))
            b = cvxopt.matrix(0.0)

            solution = cvxopt.solvers.qp(P, q, G, h, A, b)

		# Lagrange multipliers
            return np.ravel(solution['x'])
            """  
            
            SMOobj = SMO(X, y, self._c, self._tol, self._eps)
            SMOobj.SMO_main()
            return SMOobj.get_alpha()		
Пример #11
0
def innerL(i, oS):
    Ei = calcEk(oS, i)
    if((oS.labelMat[i]*Ei<-oS.tol) and (oS.alphas[i]<oS.C)) or \
            ((oS.labelMat[i]*Ei>oS.tol) and (oS.alphas[i]>0)):
        j, Ej = selectJ(i, oS, Ei)  #选择alphaJ
        alphaIold = oS.alphas[i].copy()
        alphaJold = oS.alphas[j].copy()
        #对最有值的边界确定
        if (oS.labelMat[i] != oS.labelMat[j]):
            L = max(0, oS.alphas[j] - oS.alphas[i])
            H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
        else:
            L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
            H = min(oS.C, oS.alphas[j] + oS.alphas[i])
        if L == H:
            print("L==H")
            return 0
        #eta = 2.0*oS.X[i,:]*oS.X[j,:].T-oS.X[i,:]*oS.X[i,:].T- oS.X[j,:]*oS.X[j,:].T
        eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j]  #<统计学习方法>Page128
        if eta >= 0:
            print("eta>=0")
            return 0
        oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
        oS.alphas[j] = SMO.clipAlpha(oS.alphas[j], H,
                                     L)  #将alphaJ限定在[L,H]之间 更新alphas[j]
        updateEk(oS, j)
        if (abs(oS.alphas[j] - alphaJold) < 0.00001):
            print("j not moving enough!")
            return 0
        oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (
            alphaJold - oS.alphas[j])  #更新alpha[i]
        updateEk(oS, i)
        b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[
            i, i] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i, j]
        b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[
            i, j] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j, j]
        # b1=oS.b-Ei-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T-oS.labelMat[j]*\
        #    (oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
        # b2=oS.b-Ej-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T-oS.labelMat[j]*\
        #    (oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
        if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
        elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
        else: oS.b = (b1 + b2) / 2.0
        return 1
    else:
        return 0
Пример #12
0
class TestSMOTwoPoints(unittest.TestCase):
    def setUp(self):
        X = np.array([[-1,-1], [1,1]])
        Y = np.array([-1, 1])
        self.SMOobj = SMO(X, Y, 0.1, 0.001, 0.001)

    def test_weight_correctness(self):
        self.assertTrue(self.SMOobj.get_weight()[0]/self.SMOobj.get_weight()[1] >= 1 - 1e-5 
                and self.SMOobj.get_weight()[0]/self.SMOobj.get_weight()[1] <= 1 + 1e-5)

    def test_threshold_correctness(self):
        self.assertTrue(self.SMOobj.get_threshold() <= 1e-5 and self.SMOobj.get_threshold() >= -1e-5) 
Пример #13
0
def draw(data, classes, sv, alphas, b, resolution):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(SMO.predict(data, classes, alphas, b, inputs[i]))
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    plt.scatter(sv[:,0], sv[:,1], s=10)
    
    plt.show()
Пример #14
0
def selectJ(i, oS, Ei):
    maxK = -1
    maxDeltaE = 0
    Ej = 0
    oS.eCache[i] = [1, Ei]  #eCahe是所有误差的缓存区
    vaildEcacheList = nonzero(oS.eCache[:, 0].A)[0]
    #.A表示转化为array,注意nonzero返回的成对的值,这里validEcache返回的是第一列非零的行索引.见博客:https://blog.csdn.net/qq_28773183/article/details/81013226
    if (len(vaildEcacheList)) > 1:
        for k in vaildEcacheList:
            if k == i: continue
            Ek = calcEk(oS, k)  #计算Ek,有索引k即可
            deltaE = abs(Ei - Ek)
            if (deltaE > maxDeltaE):
                maxK = k
                maxDeltaE = deltaE
                Ej = Ek
        return maxK, Ej
    else:  #随机选择alphaJ
        j = SMO.selectJrand(i, oS.m)
        Ej = calcEk(oS, j)
    return j, Ej
Пример #15
0
def main():
    print '-----------------------load data and training-----------'
    dataArr, labelArr = smo.loadDataSet('testSet.txt')
    b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
    print "b", b
    print "alphas > 0", alphas[alphas > 0]

    #calculate weights
    print '-----------------------calculate weights----------------'
    ws = calcWs(alphas, dataArr, labelArr)
    print "ws", ws

    print '-----------------------test classify result----------------'
    #process first node
    datMat = mat(dataArr)
    print "first node classify to: ", datMat[0]*mat(ws) + b
    print "first node true label:", labelArr[0]

    #process third node
    print "third node classify to: ", datMat[2]*mat(ws) + b
    print "third node true label: ", labelArr[2]
Пример #16
0
def main():
    print '-----------------------load data and training-----------'
    dataArr, labelArr = smo.loadDataSet('testSet.txt')
    b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
    print "b", b
    print "alphas > 0", alphas[alphas > 0]

    #calculate weights
    print '-----------------------calculate weights----------------'
    ws = calcWs(alphas, dataArr, labelArr)
    print "ws", ws

    print '-----------------------test classify result----------------'
    #process first node
    datMat = mat(dataArr)
    print "first node classify to: ", datMat[0] * mat(ws) + b
    print "first node true label:", labelArr[0]

    #process third node
    print "third node classify to: ", datMat[2] * mat(ws) + b
    print "third node true label: ", labelArr[2]
Пример #17
0
    def train(self, C=[0.01, 1, 10, 100], tol=1e-3):

        m = self.Y.shape[0]

        A = [0] * 10
        B = [0] * 10

        indices = numpy.random.permutation(
            self.X.shape[0])  # shape[0]表示第0轴的长度,通常是训练数据的数量
        rand_data_x = self.X[indices]
        rand_data_y = self.Y[indices]  # data_y就是标记(label)

        l = int(len(indices) / 10)

        for i in range(9):
            A[i] = rand_data_x[i * l:i * l + l]
            B[i] = rand_data_y[i * l:i * l + l]

        A[9] = rand_data_x[9 * l:]
        B[9] = rand_data_y[9 * l:]
        #        '''
        #        X_num=self.X.shape[0]
        #        train_index=range(X_num)
        #        test_size=int(X_num*0.1)+1
        #        for i in range(9):
        #            test_index=[]
        #            for j in range(test_size):
        #                randomIndex=int(numpy.random.uniform(0,len(train_index)))
        #                test_index.append(train_index[randomIndex])
        #                #del train_index[randomIndex]
        #            A[i]=self.X[test_index,:]
        #            B[i]=self.Y[test_index,:]
        #        A[9]=self.X.ix_[train_index]
        #        B[9]=self.Y.ix_[train_index]
        #        '''

        acc_best = 0
        C_best = None
        avg_acc = 0
        #        gamma_best = None
        for CVal in C:
            #            for gammaVal in gamma:
            #                avg_acc = 0
            for i in range(10):
                X_test = A[i]
                Y_test = B[i]

                # X_train = None
                # Y_train = None

                #model= SMO.SMO_Model(X_train, Y_train, CVal,  kernel,gammaVal, tol=1e-3, eps=1e-3)
                #output_model=SMO.SMO(model)

                #根据output_model的参数信息计算对应decision_function----->推得accuracy
                #acc = _evaulate(output_model)

                X_train = numpy.concatenate([
                    A[(i + 1) % 10], A[(i + 2) % 10], A[(i + 3) % 10],
                    A[(i + 4) % 10], A[(i + 5) % 10], A[(i + 6) % 10],
                    A[(i + 7) % 10], A[(i + 8) % 10], A[(i + 9) % 10]
                ],
                                            axis=0)
                Y_train = numpy.concatenate([
                    B[(i + 1) % 10], B[(i + 2) % 10], B[(i + 3) % 10],
                    B[(i + 4) % 10], B[(i + 5) % 10], B[(i + 6) % 10],
                    B[(i + 7) % 10], B[(i + 8) % 10], B[(i + 9) % 10]
                ],
                                            axis=0)

                #                SMO.GG = gammaVal
                # calculate Kernel Matrix then pass it to SMO.
                if self.IK:
                    if self.kernel_dict['type'] == 'TANH':
                        K = Kernel.TANH(X_train.shape[0],
                                        self.kernel_dict['c'],
                                        self.kernel_dict['d'])
                        K.calculate(X_train)
                    elif self.kernel_dict['type'] == 'TL1':
                        K = Kernel.TL1(X_train.shape[0],
                                       self.kernel_dict['rho'])
                        K.calculate(X_train)

                    p1, p2 = trans_mat(K.kernelMat)
                    K.kernelMat = np.dot((p1 - p2), K.kernelMat)

                if self.kernel_dict['type'] == 'RBF':
                    K = Kernel.RBF(X_train.shape[0], self.kernel_dict['gamma'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'LINEAR':
                    K = Kernel.LINEAR(X_train.shape[0])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'POLY':
                    K = Kernel.POLY(X_train.shape[0], self.kernel_dict['c'],
                                    self.kernel_dict['d'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'TANH':
                    K = Kernel.TANH(X_train.shape[0], self.kernel_dict['c'],
                                    self.kernel_dict['d'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'TL1':
                    K = Kernel.TL1(X_train.shape[0], self.kernel_dict['rho'])
                    K.calculate(X_train)

                model = SMO.SMO_Model(X_train,
                                      Y_train,
                                      CVal,
                                      K,
                                      tol=1e-3,
                                      eps=1e-3)

                output_model = SMO.SMO(model)

                #IK
                if self.IK:
                    output_model.alphas = np.dot((p1 - p2),
                                                 output_model.alphas)

                acc = SMO._evaluate(output_model, X_test, Y_test)

                avg_acc = avg_acc + acc / 10

                if avg_acc > acc_best:
                    acc_best = avg_acc
                    #更新C gamma
                    C_best = CVal
#                    gamma_best =gammaVal
#                    self.gamma = gamma_best

#最后一遍train
#        SMO.GG = gamma_best

#!K
        if self.IK:
            if self.kernel_dict['type'] == 'TANH':
                K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'],
                                self.kernel_dict['d'])
                K.calculate(self.X)
            elif self.kernel_dict['type'] == 'TL1':
                K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho'])
                K.calculate(self.X)

            p1, p2 = trans_mat(K.kernelMat)
            K.kernelMat = np.dot((p1 - p2), K.kernelMat)

        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(self.X.shape[0], self.kernel_dict['gamma'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(self.X.shape[0])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(self.X.shape[0], self.kernel_dict['c'],
                            self.kernel_dict['d'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'TANH':
            K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'],
                            self.kernel_dict['d'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'TL1':
            K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho'])
            K.calculate(self.X)

        SVM_model = SMO.SMO(
            SMO.SMO_Model(self.X, self.Y, C_best, K, tol=1e-3, eps=1e-3))
        # 参数传递给最后生成的SVM类

        if self.IK:
            SVM_model.alphas = np.dot((p1 - p2), SVM_model.alphas)

        self.X = SVM_model.X
        self.Y = SVM_model.y
        self.kernel_dict = SVM_model.kernel
        self.alphas = SVM_model.alphas
        self.b = SVM_model.b

        # C_best = C
        # gamma_best =gamma

        # (w,b) = SMO(X_train,Y_train,C_best,gamma_best,kernal,tol=1e-3)
        # self.w = w
        # self.b = b
        return None
Пример #18
0
from SMO import *
from sklearn.model_selection import train_test_split
import numpy as np

x = np.random.normal(size=(5, ))
y = np.outer(x, x)
z = np.random.multivariate_normal(np.zeros(5) - 2, y, size=100)

x1 = np.random.normal(size=(5, ))
y1 = np.outer(x1, x1)
z1 = np.random.multivariate_normal(np.zeros(5) + 2, y1, size=100)

X_train = np.r_[z, z1]

Y_train = np.r_[[1 for x in range(100)], [-1 for x in range(100)]]

X_train, X_test, y_train, y_test = train_test_split(X_train,
                                                    Y_train,
                                                    test_size=0.33,
                                                    random_state=42)

model = SMO()
model.fit(X_train, y_train)
print('score:', model.score(X_test, y_test))
Пример #19
0
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(SMO.predict(data, classes, alphas, b, inputs[i]))
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    plt.scatter(sv[:,0], sv[:,1], s=10)
    
    plt.show()

if __name__ == '__main__':
    data_set, N = simulate_data()
    data, classes = cook_data(data_set)
    #print data
    #print classes
    alphas, b = SMO.run(data, classes)
    print alphas
    print b
    sv = []
    for i in range(len(alphas)):
        if alphas[i] >= 0.001 and alphas[i] <= 100.0:
            print alphas[i], data[i]
            sv.append(data[i])
    sv = np.array(sv)
    print sv
    draw(data, classes, sv, alphas, b, 100.)
Пример #20
0
def selector(algo, func_details, popSize, Iter, trainDataset, testDataset):
    function_name = func_details[0]
    lb = func_details[1]
    ub = func_details[2]

    DatasetSplitRatio = 2 / 3

    dataTrain = "datasets/" + trainDataset
    dataTest = "datasets/" + testDataset

    Dataset_train = numpy.loadtxt(open(dataTrain, "rb"),
                                  delimiter=",",
                                  skiprows=0)
    Dataset_test = numpy.loadtxt(open(dataTest, "rb"),
                                 delimiter=",",
                                 skiprows=0)

    numRowsTrain = numpy.shape(Dataset_train)[
        0]  # number of instances in the train dataset
    numInputsTrain = numpy.shape(
        Dataset_train)[1] - 1  #number of features in the train dataset

    numRowsTest = numpy.shape(Dataset_test)[
        0]  # number of instances in the test dataset

    numInputsTest = numpy.shape(
        Dataset_test)[1] - 1  #number of features in the test dataset

    trainInput = Dataset_train[0:numRowsTrain, 0:-1]
    trainOutput = Dataset_train[0:numRowsTrain, -1]

    testInput = Dataset_test[0:numRowsTest, 0:-1]
    testOutput = Dataset_test[0:numRowsTest, -1]

    #number of hidden neurons
    HiddenNeurons = numInputsTrain * 2 + 1
    net = nl.net.newff([[0, 1]] * numInputsTrain, [HiddenNeurons, 1])

    dim = (numInputsTrain * HiddenNeurons) + (2 * HiddenNeurons) + 1

    if (algo == 0):
        x = pso.PSO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                    trainInput, trainOutput, net)
    if (algo == 1):
        x = mvo.MVO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                    trainInput, trainOutput, net)
    if (algo == 2):
        x = gwo.GWO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                    trainInput, trainOutput, net)
    if (algo == 3):
        x = mfo.MFO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                    trainInput, trainOutput, net)
    if (algo == 4):
        x = cs.CS(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                  trainInput, trainOutput, net)
    if (algo == 5):
        x = bat.BAT(getattr(costNN, function_name), lb, ub, dim, popSize, Iter,
                    trainInput, trainOutput, net)
    if (algo == 6):
        x = smo.main(getattr(costNN, function_name), lb, ub, dim, popSize,
                     Iter, 1.0e-5, trainInput, trainOutput, net)
    # print(type(x),'------------------------------')

    # Evaluate MLP classification model based on the training set
    trainClassification_results = evalNet.evaluateNetClassifier(
        x, trainInput, trainOutput, net)
    x.trainAcc = trainClassification_results[0]
    x.trainTP = trainClassification_results[1]
    x.trainFN = trainClassification_results[2]
    x.trainFP = trainClassification_results[3]
    x.trainTN = trainClassification_results[4]

    # Evaluate MLP classification model based on the testing set
    testClassification_results = evalNet.evaluateNetClassifier(
        x, testInput, testOutput, net)
    x.testAcc = testClassification_results[0]
    x.testTP = testClassification_results[1]
    x.testFN = testClassification_results[2]
    x.testFP = testClassification_results[3]
    x.testTN = testClassification_results[4]

    return x
Пример #21
0
calculate w and draw the picture,
the variable which the α not equal zero , 
we call support vector
'''


def calculateW(alphas, data, labels):
    x = np.mat(data)
    label = np.mat(labels).transpose()
    m, n = np.shape(x)
    w = np.zeros((n, 1))
    for i in range(m):
        w += np.multiply(alphas[i] * label[i], x[i, :].T)
    return w
    pass


if __name__ == '__main__':
    data, label = Tool.loadDataSet('../Data/testSet.txt')
    b, alphas = SMO.smo(data, label, kernel=False)
    w = calculateW(alphas, data, label)
    x = np.arange(0, 11)
    print(w)
    y = (-b - w[0] * x) / w[1]
    Tool.drawDataset(data, label, x, y.tolist()[0], line=True, alphas=alphas)

    data, label = Tool.loadDataSet('../Data/testSetRBF.txt')
    b, alphas = SMO.smo(data, label, kernel=True, maxIter=100)
    svInd = np.nonzero(alphas.A > 0)[0]
    Tool.drawDataset(data, label, line=False, alphas=alphas)
Пример #22
0
 def setUp(self):
     X = np.array([[1,1]])
     Y = np.array([1])
     self.SMOobj = SMO(X, Y, 0.1, 0.001, 0.001)
     self.SMOobj.SMO_main()
Пример #23
0
    i.pop(0)
    tmp = []
    for j in range(784):
        tmp.append(0)
    for j in i:
        plz = j.find(':')
        tmp[int(j[0:plz])] = int(j[plz + 1:]) * r
    dataset.append(tmp)

dataset = np.array(dataset)
label = np.array(label)

# ytmp = label.reshape(-1, 1) * 1.
# xtmp = ytmp * dataset

alpha, x, y, b = SMO(dataset, label, 10)
b = np.float(b)
temp = alpha * y
temp = np.transpose(temp)
omega = np.dot(temp, x)

# omega = np.transpose(omega)

datafile = open('test-01-images.svm', mode='r')
tedata = datafile.readlines()
datafile.close()
testdata = []
for i in tedata:
    testdata.append(i.replace('\n', '').split(' ', i.count(' ')))
dataset = []
label = []
Пример #24
0
 def setUp(self):
     X = np.array([[-1,-1], [1,1]])
     Y = np.array([-1, 1])
     self.SMOobj = SMO(X, Y, 0.1, 0.001, 0.001)