def test25():
    '''测试G4函数'''
    from Main import TestFunction_G4

    f = TestFunction_G4()

    x = np.array([78,33,29.996,45,36.7758])
    y = np.array([78,33,30.04292047,45,36.65603025])
    z = np.array([78,33,29.99790876,45,36.76916756])
    print(f.isOK(x))
    print(f.aim(x))
    print(f.isOK(y))
    print(f.aim(y))    
    print(f.isOK(z))
    print(f.aim(z))    


    func = lambda x : f.aim(x)-100000*np.min([f.isOK(x),0])
    from ADE import ADE
    ade = ADE(f.min, f.max, 100, 0.5, func,True)
    opt_ind = ade.evolution(maxGen=100000)
    print(opt_ind.x)

    pointNum = 1
    for i in range(f.dim):
        pointNum *= f.max[i]-f.min[i]+1
    
    value = np.zeros(pointNum)
    mark = np.zeros(pointNum)
    for i in range(pointNum):
        point = f.min.copy()
        index = i
        for j in range(f.dim):
            point[j] += index%(f.max[j]-f.min[j]+1)
            index = index // (f.max[j]-f.min[j]+1)
            if index == 0:
                break
        value[i] = f.aim(point)
        mark[i] = f.isOK(point)

    value = np.reshape(value,(-1,1))
    mark = np.reshape(mark,(-1,1))
    data = np.hstack((value,mark))
    np.savetxt('./Data/G4函数测试/data.txt',data)  

    posNum = np.sum(mark==1)
    negNum = np.sum(mark == -1)  
    print(posNum,negNum)
    def Step_C(self):
        #违反约束的惩罚系数
        #惩罚系数必须足够大,足以弥补EI函数与y之间的数量差距
        penalty = 10000000000000

        # 加载支持向量机
        svm = SVM_SKLearn.SVC(C=1000, kernel='rbf', gamma=0.0005)

        # 提取已采样样本的坐标,值,是否违反约束的标志
        testFunc = self.f

        data = np.loadtxt(self.logPath + '/B_Samples.txt')
        samples = data[:, 0:testFunc.dim]
        value = data[:, testFunc.dim]
        mark = data[:, testFunc.dim + 1]

        print('训练初始支持向量机...')
        svm.fit(samples, mark)
        self.f.report(svm, self.testData)

        #建立响应面
        kriging = Kriging()

        theta = [
            0.0487903230, 0.0500304674, 0.0643049375, 0.001, 0.001,
            0.0118521980, 0.001, 0.001, 0.001, 0.874052978, 1.1136854,
            0.885431989, 0.001
        ]
        kriging.fit(samples, value, self.f.l, self.f.u, theta)

        # print('正在优化theta参数....')
        # kriging.fit(samples, value, self.f.l, self.f.u)
        # theta = kriging.optimize(10000,self.logPath+'/ADE_theta.txt')

        # 搜索kriging模型在可行域中的最优值
        def kriging_optimum(x):
            y = kriging.get_Y(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return y - penaltyItem

        #kriging的global_optimum函数只能找到全局最优,而不是可行域最优
        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=5000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)
        print('最优值的实际判定结果%.4f' % testFunc.isOK(opt_ind.x))
        print('最优值的SVM判定结果%.4f' % svm.decision_function([opt_ind.x]))

        #目标函数是EI函数和约束罚函数的组合函数
        def EI_optimum(x):
            ei = kriging.EI(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return ei + penaltyItem

        def Varience_optimum(x):
            s = kriging.get_S(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return s + penaltyItem

        iterNum = 100  #迭代次数
        maxEI_threshold = 0.0001
        smallestDistance = 0.01

        for k in range(iterNum):
            print('\n第%d轮加点.........' % k)
            #每轮加点为方差最大值,EI函数最大值

            print('搜索EI函数在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum, False)
            opt_ind = ade.evolution(maxGen=5000)
            nextSample = opt_ind.x
            maxEI = EI_optimum(opt_ind.x)

            while maxEI < 0:
                print('EI函数最优值求解失败,重新求解...')
                ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum, False)
                opt_ind = ade.evolution(maxGen=5000)
                nextSample = opt_ind.x
                maxEI = EI_optimum(opt_ind.x)
            print('EI函数最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            print('搜索方差在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, Varience_optimum, False)
            opt_ind = ade.evolution(5000, 0.8)
            nextSample = np.vstack((nextSample, opt_ind.x))
            print('方差最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            #如果加点过于逼近,只选择一个点
            nextSample = filterSamples(nextSample, samples, smallestDistance)

            #判定终止条件

            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

            # 检查新样本点是否满足约束,并检查SVM判定结果。
            # 如果SVM判定失误,重新训练SVM模型
            # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
            nextSampleNum = nextSample.shape[0]
            nextValue = np.zeros(nextSampleNum)
            nextFuncMark = np.zeros(nextSampleNum)
            for i in range(nextSampleNum):
                nextValue[i] = testFunc.aim(nextSample[i, :])
                nextFuncMark[i] = testFunc.isOK(nextSample[i, :])

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            # 如果只在发现SVM判断错误的前提下重训练,一般只会提高查准率,而不利于查全率的提升。
            # 如果发现最优点满足约束,也应重训练,以增大附近可行区域
            print('训练支持向量机...')
            svm.fit(samples, mark)
            self.f.report(svm, self.testData)

            kriging.fit(samples, value, self.f.l, self.f.u, theta)

            print('搜索kriging模型在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)
            print('最优值的实际判定结果%.4f' % testFunc.isOK(kriging.optimumLocation))

            Data = np.hstack((samples, value.reshape(
                (-1, 1)), mark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        while testFunc.isOK(kriging.optimumLocation) == -1:

            nextSample = kriging.optimumLocation
            nextValue = testFunc.aim(nextSample)
            nextFuncMark = testFunc.isOK(nextSample)

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            print('区间错误,训练支持向量机...')
            svm.fit(samples, mark)
            self.f.report(svm, self.testData)

            print('搜索kriging模型在约束区间的最优值.....')
            kriging.fit(samples, value, self.f.l, self.f.u, theta)
            ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)
            print('最优值的实际判定结果%.4f' % testFunc.isOK(kriging.optimumLocation))

            Data = np.hstack((samples, value.reshape(
                (-1, 1)), mark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        print('全局最优值:', kriging.optimum)
        print('全局最优值坐标:', kriging.optimumLocation)
示例#3
0
    def Step_C(self):
        #违反约束的惩罚系数
        #惩罚系数必须足够大,足以弥补EI函数与y之间的数量差距
        penalty = 10000000000000

        # 加载支持向量机
        Kernal_Gau = lambda x, y: np.exp((-np.linalg.norm(x - y)**2) / 90)
        Kernal_Poly = lambda x, y: (np.dot(x, y) + 1)**9
        svm = SVM(1000,
                  kernal=Kernal_Gau,
                  path=self.logPath,
                  fileName='SVM_Step_C.txt')

        # 提取已采样样本的坐标,值,是否违反约束的标志
        testFunc = TestFunction_G4()

        # data = np.loadtxt(self.logPath+'/B_Samples.txt')
        data = np.loadtxt(self.logPath + '/全部样本点.txt')
        allSamples = data[:, 0:testFunc.dim]
        allValue = data[:, testFunc.dim]
        allMark = data[:, testFunc.dim + 1]

        print('训练初始支持向量机...')
        # svm.fit(allSamples,allMark,30000,maxAcc=1.1)
        # test28(svm)
        svm.retrain(self.logPath + '/SVM_Step_C.txt', maxIter=10, maxAcc=1.1)

        # #空间缩减,保留可行域外围1.1倍区域的超立方空间,避免因点数过多引起的计算冗余
        # space_min,space_max = FeasibleSpace(svm,[12,6,9,9,9],0.1)

        # #为了防止过拟合,支持向量机的训练并不彻底。所以也存在正例被判定为反例的情况。
        # # 导致SVM的正例区域不一定完整包含可行域,需要提取当前采样点的正例的空间,两个空间求并

        # allSamples_pos = allSamples[allMark==1]
        # add_space_min = np.min(allSamples_pos,0)
        # add_space_max = np.max(allSamples_pos,0)
        # add_space_max = (add_space_max-add_space_min)*0.1+add_space_max
        # add_space_min = add_space_min-(add_space_max-add_space_min)*0.1
        # for i in range(testFunc.dim):
        #     if space_min[i]>add_space_min[i]:
        #         space_min[i] = add_space_min[i]
        #     if space_max[i]<add_space_max[i]:
        #         space_max[i] = add_space_max[i]

        # #与函数取值空间比对,求交集
        # for i in range(testFunc.dim):
        #     if space_min[i]<testFunc.min[i]:
        #         space_min[i] = testFunc.min[i]
        #     if space_max[i]>testFunc.max[i]:
        #         space_max[i] = testFunc.max[i]

        space_min = testFunc.min
        space_max = testFunc.max
        #剔除搜索区域之外的样本点

        l = []
        for i in range(allSamples.shape[0]):
            for j in range(testFunc.dim):
                if allSamples[i, j] < space_min[j] or allSamples[
                        i, j] > space_max[j]:
                    l.append(i)
                    break

        samples = np.delete(allSamples, l, axis=0)
        value = np.delete(allValue, l)
        mark = np.delete(allMark, l)

        #建立响应面
        kriging = Kriging()

        theta = [0.279323019, 0.001, 3.15045620, 0.001, 0.179147511]
        kriging.fit(samples, value, space_min, space_max, theta)

        # print('正在优化theta参数....')
        # kriging.fit(samples, value, space_min, space_max)
        # theta = kriging.optimize(10000,self.logPath+'/ADE_theta.txt')

        # 搜索kriging模型在可行域中的最优值
        def kriging_optimum(x):
            y = kriging.get_Y(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return y - penaltyItem

        #kriging的global_optimum函数只能找到全局最优,而不是可行域最优
        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=5000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)
        print('SVM对最优值的判定结果%.4f' % svm.transform(kriging.optimumLocation))

        # testX = [78,34.6102252,30.98470067,29.68978243,28.85514208]
        # print('SVM对最优值的判定结果%.4f'%svm.transform(testX))
        # kriging.optimumLocation = testX
        # kriging.optimum = kriging.get_Y(testX)

        #目标函数是EI函数和约束罚函数的组合函数
        def EI_optimum(x):
            ei = kriging.EI(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return ei + penaltyItem

        def Varience_optimum(x):
            s = kriging.get_S(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return s + penaltyItem

        iterNum = 100  #迭代次数
        maxEI_threshold = 0.0001
        smallestDistance = 0.01

        for k in range(iterNum):
            print('\n第%d轮加点.........' % k)
            #每轮加点为方差最大值,EI函数最大值

            print('搜索EI函数在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, EI_optimum, False)
            opt_ind = ade.evolution(maxGen=5000)
            nextSample = opt_ind.x
            maxEI = EI_optimum(opt_ind.x)

            while maxEI < 0:
                print('EI函数最优值求解失败,重新求解...')
                ade = ADE(space_min, space_max, 200, 0.5, EI_optimum, False)
                opt_ind = ade.evolution(maxGen=5000)
                nextSample = opt_ind.x
                maxEI = EI_optimum(opt_ind.x)
            print('EI函数最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            print('搜索方差在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, Varience_optimum, False)
            opt_ind = ade.evolution(5000, 0.8)
            nextSample = np.vstack((nextSample, opt_ind.x))
            print('方差最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            #如果加点过于逼近,只选择一个点
            nextSample = filterSamples(nextSample, samples, smallestDistance)

            #判定终止条件

            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

            # 检查新样本点是否满足约束,并检查SVM判定结果。
            # 如果SVM判定失误,重新训练SVM模型
            # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
            nextSampleNum = nextSample.shape[0]
            nextValue = np.zeros(nextSampleNum)
            nextFuncMark = np.zeros(nextSampleNum)
            nextSVMMark = np.zeros(nextSampleNum)
            for i in range(nextSampleNum):
                nextValue[i] = testFunc.aim(nextSample[i, :])
                nextFuncMark[i] = testFunc.isOK(nextSample[i, :])
                nextSVMMark[i] = svm.transform(nextSample[i, :])

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            allSamples = np.vstack((allSamples, nextSample))
            allValue = np.append(allValue, nextValue)
            allMark = np.append(allMark, nextFuncMark)

            # 如果只在发现SVM判断错误的前提下重训练,一般只会提高查准率,而不利于查全率的提升。
            # 如果发现最优点满足约束,也应重训练,以增大附近可行区域
            print('训练支持向量机...')
            svm.fit(samples, mark, 30000, maxAcc=1.1)
            test28(svm)

            # for i in range(nextSampleNum):
            #     if (nextFuncMark[i] == -1 and nextSVMMark[i] > 0) or (nextFuncMark[i] == 1 and nextSVMMark[i] < 0):
            #         print('新采样点的计算结果与SVM判定不符,重新训练SVM模型.......')
            #         svm.fit(samples,mark,30000,maxAcc=1.1)
            #         test28(svm)
            #         break

            #     if nextFuncMark[i] == -1 and nextSVMMark[i] < 0:
            #         print('新采样点位于违反约束区域,惩罚系数乘2')
            #         penalty *= 1.1

            kriging.fit(samples, value, space_min, space_max, theta)

            print('搜索kriging模型在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)

            Data = np.hstack((allSamples, allValue.reshape(
                (-1, 1)), allMark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        while testFunc.isOK(kriging.optimumLocation) == -1:

            nextSample = kriging.optimumLocation
            nextValue = testFunc.aim(nextSample)
            nextFuncMark = testFunc.isOK(nextSample)

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            allSamples = np.vstack((allSamples, nextSample))
            allValue = np.append(allValue, nextValue)
            allMark = np.append(allMark, nextFuncMark)

            print('区间错误,训练支持向量机...')
            svm.fit(samples, mark, 30000, maxAcc=1.1)
            test28(svm)

            print('搜索kriging模型在约束区间的最优值.....')
            kriging.fit(samples, value, space_min, space_max, theta)
            ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)

            Data = np.hstack((allSamples, allValue.reshape(
                (-1, 1)), allMark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        print('全局最优值:', kriging.optimum)
        print('全局最优值坐标:', kriging.optimumLocation)
示例#4
0
def stepC_1():
    '''步骤C的第一种版本'''
    #违反约束的惩罚系数
    penalty = 10000
    root_path = './Data/约束优化算法测试1/stepC_5'
    import os
    if not os.path.exists(root_path):
        os.makedirs(root_path)

    # 加载支持向量机
    svm = SVM(5, kernal=Kernal_Polynomial, path=root_path)
    svm.retrain(root_path + '/SVM_Argument_2019-01-15_11-41-00.txt', maxIter=1)

    # 提取已采样样本的坐标,值,是否违反约束的标志
    allSamples = svm.x
    allValue = np.zeros(allSamples.shape[0])
    allMark = np.zeros(allSamples.shape[0])
    testFunc = TestFunction_G8()
    for i in range(allSamples.shape[0]):
        allValue[i] = testFunc.aim(allSamples[i, :])
        allMark[i] = testFunc.isOK(allSamples[i, :])

    #空间缩减,保留可行域外围1.1倍区域的超立方空间,避免因点数过多引起的计算冗余
    space_min, space_max = FeasibleSpace(svm, 0.1, [100, 100])
    x, y = np.mgrid[space_min[0]:space_max[0]:100j,
                    space_min[1]:space_max[1]:100j]

    #剔除搜索区域之外的样本点

    l = []
    for i in range(allSamples.shape[0]):
        if allSamples[i,0]<space_min[0] or allSamples[i,1]<space_min[1] \
        or allSamples[i,0]>space_max[0] or allSamples[i,1]>space_max[1]:
            l.append(i)

    samples = np.delete(allSamples, l, axis=0)
    value = np.delete(allValue, l)
    mark = np.delete(allMark, l)

    #建立响应面
    kriging = Kriging()
    # theta = [32.22662213, 18.59361027]
    # kriging.fit(samples, value, space_min, space_max, theta)

    print('正在优化theta参数....')
    kriging.fit(samples, value, space_min, space_max)
    theta = kriging.optimize(10000, root_path + '/ADE_theta.txt')

    # 搜索kriging模型在可行域中的最优值
    def kriging_optimum(x):
        y = kriging.get_Y(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return y - penaltyItem

    print('搜索kriging模型在约束区间的最优值.....')
    ade = ADE(space_min, space_max, 100, 0.5, kriging_optimum, True)
    opt_ind = ade.evolution(maxGen=100000)
    kriging.optimumLocation = opt_ind.x
    kriging.optimum = kriging.get_Y(opt_ind.x)

    #目标函数是EI函数和约束罚函数的组合函数
    def EI_optimum(x):
        ei = kriging.EI(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return ei + penaltyItem

    def Varience_optimum(x):
        s = kriging.get_S(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return s + penaltyItem

    iterNum = 100  #加点数目
    preValue = np.zeros_like(x)
    EI_Value = np.zeros_like(x)
    varience = np.zeros_like(x)

    maxEI_threshold = 0.0001
    optimum_threshold = 0.0001
    smallestDistance = 0.01
    lastOptimum = None

    for k in range(iterNum):
        #遍历响应面
        print('正在遍历响应面...')
        for i in range(0, x.shape[0]):
            for j in range(0, x.shape[1]):
                a = [x[i, j], y[i, j]]
                if (svm.transform(a) < 0):
                    preValue[i, j] = 0
                    varience[i, j] = 0
                    EI_Value[i, j] = 0
                else:
                    preValue[i, j] = kriging.get_Y(a)
                    varience[i, j] = kriging.get_S(a)
                    EI_Value[i, j] = kriging.EI(a)

        path1 = root_path + '/Kriging_Predicte_Model_%d.txt' % k
        writeFile([x, y, preValue], [samples, value], path1)
        path2 = root_path + '/Kriging_Varience_Model_%d.txt' % k
        writeFile([x, y, varience], [samples, value], path2)
        path3 = root_path + '/Kriging_EI_Model_%d.txt' % k
        writeFile([x, y, EI_Value], [samples, value], path3)

        print('\n第%d轮加点.........' % k)
        #每轮加点为方差最大值,EI函数最大值

        # 不建议加入最优值,因为最优值与原有采样点的距离过于接近了。
        # nextSample = kriging.optimumLocation

        print('搜索EI函数在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, EI_optimum, False)
        opt_ind = ade.evolution(maxGen=100000)
        nextSample = opt_ind.x
        # nextSample = np.vstack((nextSample,opt_ind.x))
        maxEI = EI_optimum(opt_ind.x)

        print('搜索方差在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, Varience_optimum, False)
        opt_ind = ade.evolution(10000, 0.8)
        nextSample = np.vstack((nextSample, opt_ind.x))

        #如果加点过于逼近,只选择一个点
        nextSample = filterSamples(nextSample, samples, smallestDistance)

        #判定终止条件
        if k == 0:
            lastOptimum = kriging.optimum
        else:
            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            #以最优值提升为终止条件极容易导致算法过早停止
            # # 当全局最优值两轮变化小于最优值门限
            # if abs(lastOptimum-kriging.optimum) < optimum_threshold:
            #     print('kriging模型全局最优值的提升小于%.5f,计算终止'%optimum_threshold)
            #     break
            # else:
            #     print('kriging模型全局最优值的提升%.5f'%(abs(lastOptimum-kriging.optimum)))
            #     lastOptimum = kriging.optimum

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

        # 检查新样本点是否满足约束,并检查SVM判定结果。
        # 如果SVM判定失误,重新训练SVM模型
        # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
        nextSampleNum = nextSample.shape[0]
        nextValue = np.zeros(nextSampleNum)
        nextFuncMark = np.zeros(nextSampleNum)
        nextSVMMark = np.zeros(nextSampleNum)
        for i in range(nextSampleNum):
            nextValue[i] = testFunc.aim(nextSample[i, :])
            nextFuncMark[i] = testFunc.isOK(nextSample[i, :])
            nextSVMMark[i] = svm.transform(nextSample[i, :])

        samples = np.vstack((samples, nextSample))
        value = np.append(value, nextValue)
        mark = np.append(mark, nextFuncMark)

        allSamples = np.vstack((allSamples, nextSample))
        allValue = np.append(allValue, nextValue)
        allMark = np.append(allMark, nextFuncMark)

        for i in range(nextSampleNum):
            if (nextFuncMark[i] == -1
                    and nextSVMMark[i] > 0) or (nextFuncMark[i] == 1
                                                and nextSVMMark[i] < 0):
                print('新采样点的计算结果与SVM判定不符,重新训练SVM模型.......')
                svm.fit(samples, mark, 500000)
                svm.show()

                #不建议多次设定搜索区域,首先SVM的外围需要一定的负样本来限定超平面,同时kriging模型
                #也需要负样本来评判目标函数在边界处的取值。反复设定搜索区域的确会减少点的数目,但约束外侧
                #过少的采样点会使后续的预测恶化。

                # #设定搜索区域
                # space_min,space_max = FeasibleSpace(svm,0.1)
                # x, y = np.mgrid[space_min[0]:space_max[0]:100j, space_min[1]:space_max[1]:100j]

                # #剔除搜索区域之外的样本点

                # l = []
                # for i in range(allSamples.shape[0]):
                #     if allSamples[i,0]<space_min[0] or allSamples[i,1]<space_min[1] \
                #     or allSamples[i,0]>space_max[0] or allSamples[i,1]>space_max[1]:
                #         l.append(i)

                # samples = np.delete(allSamples,l,axis=0)
                # value = np.delete(allValue,l)
                # mark = np.delete(allMark,l)
                break

            if nextFuncMark[i] == -1 and nextSVMMark[i] < 0:
                print('新采样点位于违反约束区域,惩罚系数乘2')
                penalty *= 1.1

        print('正在优化theta参数....')
        kriging.fit(samples, value, space_min, space_max)
        theta = kriging.optimize(10000, root_path + '/ADE_theta.txt')

        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=100000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)

        Data = np.hstack((samples, value.reshape((-1, 1)), mark.reshape(
            (-1, 1))))
        np.savetxt(root_path + '/优化结果.txt', Data, delimiter='\t')

    print('全局最优值:', kriging.optimum)
    print('全局最优值坐标:', kriging.optimumLocation)