def test24():
    '''制作解释样本点线性分布的示例图片'''
    from DOE import PseudoMonteCarlo


    f = lambda x:np.sin(x)
    end = 4*np.pi
    line_x = np.linspace(0,end,100)
    line_y = f(line_x)

    pt_x = np.arange(0,end+1,np.pi)
    pt_y = f(pt_x)

    coeff = np.polyfit(pt_x,pt_y,7)
    polyFunc = np.poly1d(coeff)
    fit_y = polyFunc(line_x)
    
    
    plt.plot(line_x,line_y)
    plt.scatter(pt_x,pt_y)
    plt.plot(line_x,fit_y)
    plt.show()

    pt_x = PseudoMonteCarlo(np.array([4]),1,[0.5*np.pi],[3.5*np.pi]).realSamples.reshape((-1))
    pt_x = np.append(pt_x,[0,end])

    
    pt_y = f(pt_x)

    pt_x = pt_x.reshape((-1,1))
    pt_y = pt_y.reshape((-1,1))

    from Kriging import Kriging

    kriging = Kriging()
    kriging.fit(pt_x,pt_y,min=[0],max=[end])
    fit_y = np.zeros_like(line_y)
    for i in range(line_x.shape[0]):
        fit_y[i] = kriging.get_Y(np.array([line_x[i]]))


    # def func(x,a,b,c):
    #     return a*np.sin(x+b)+c
    # from scipy.optimize import curve_fit
    # popt,pcov = curve_fit(func,pt_x,pt_y)
    # a = popt[0]
    # b = popt[1]
    # c = popt[2]
    # fit_y = func(line_x,a,b,c)

    plt.plot(line_x,line_y)
    plt.scatter(pt_x,pt_y)
    plt.plot(line_x,fit_y)
    plt.show()
    def GenerateAuxiliarySample(self,auxiliarySampleNum = 10):
        '''初步搜索设计空间\n
        input : \n
        initSampleNum : 整型,初始采样点数目\n
        auxiliarySampleNum : 整型,附加采样点数目\n'''

        samples = np.loadtxt(self.logPath+'/InitSamples.txt')
        value = np.loadtxt(self.logPath+'/初始样本集计算结果/Polytropic_efficiency.txt')
        #建立响应面
        kriging = Kriging()
        theta = [0.77412787, 1.07789896, 0.12595146,1.46702414]
        kriging.fit(samples, value, self.l, self.u,theta)
        
        # print('正在优化theta参数...')
        # theta = kriging.optimize(10000,self.logPath+'/theta优化种群数据.txt')

        #原则上应该计算新的样本点再赋值,但是过于频繁有些麻烦,所以一次性生成了
        for k in range(auxiliarySampleNum):
            print('第%d次加点...'%(k+1))
            nextSample = kriging.nextPoint_Varience()
            samples = np.vstack([samples,nextSample])
            nextValue = kriging.get_Y(nextSample)+np.random.randint(-1,1)*0.01
            value = np.append(value,nextValue)
            kriging.fit(samples, value, self.l, self.u, theta)

        np.savetxt(self.logPath+'/A_Samples.txt',samples,fmt='%.18f')
    def Step_C(self):
        #违反约束的惩罚系数
        #惩罚系数必须足够大,足以弥补EI函数与y之间的数量差距
        penalty = 10000000000000

        # 加载支持向量机
        svm = SVM_SKLearn.SVC(C=1000, kernel='rbf', gamma=0.0005)

        # 提取已采样样本的坐标,值,是否违反约束的标志
        testFunc = self.f

        data = np.loadtxt(self.logPath + '/B_Samples.txt')
        samples = data[:, 0:testFunc.dim]
        value = data[:, testFunc.dim]
        mark = data[:, testFunc.dim + 1]

        print('训练初始支持向量机...')
        svm.fit(samples, mark)
        self.f.report(svm, self.testData)

        #建立响应面
        kriging = Kriging()

        theta = [
            0.0487903230, 0.0500304674, 0.0643049375, 0.001, 0.001,
            0.0118521980, 0.001, 0.001, 0.001, 0.874052978, 1.1136854,
            0.885431989, 0.001
        ]
        kriging.fit(samples, value, self.f.l, self.f.u, theta)

        # print('正在优化theta参数....')
        # kriging.fit(samples, value, self.f.l, self.f.u)
        # theta = kriging.optimize(10000,self.logPath+'/ADE_theta.txt')

        # 搜索kriging模型在可行域中的最优值
        def kriging_optimum(x):
            y = kriging.get_Y(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return y - penaltyItem

        #kriging的global_optimum函数只能找到全局最优,而不是可行域最优
        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=5000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)
        print('最优值的实际判定结果%.4f' % testFunc.isOK(opt_ind.x))
        print('最优值的SVM判定结果%.4f' % svm.decision_function([opt_ind.x]))

        #目标函数是EI函数和约束罚函数的组合函数
        def EI_optimum(x):
            ei = kriging.EI(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return ei + penaltyItem

        def Varience_optimum(x):
            s = kriging.get_S(x)
            penaltyItem = penalty * min(0, svm.decision_function([x])[0])
            return s + penaltyItem

        iterNum = 100  #迭代次数
        maxEI_threshold = 0.0001
        smallestDistance = 0.01

        for k in range(iterNum):
            print('\n第%d轮加点.........' % k)
            #每轮加点为方差最大值,EI函数最大值

            print('搜索EI函数在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum, False)
            opt_ind = ade.evolution(maxGen=5000)
            nextSample = opt_ind.x
            maxEI = EI_optimum(opt_ind.x)

            while maxEI < 0:
                print('EI函数最优值求解失败,重新求解...')
                ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum, False)
                opt_ind = ade.evolution(maxGen=5000)
                nextSample = opt_ind.x
                maxEI = EI_optimum(opt_ind.x)
            print('EI函数最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            print('搜索方差在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, Varience_optimum, False)
            opt_ind = ade.evolution(5000, 0.8)
            nextSample = np.vstack((nextSample, opt_ind.x))
            print('方差最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            #如果加点过于逼近,只选择一个点
            nextSample = filterSamples(nextSample, samples, smallestDistance)

            #判定终止条件

            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

            # 检查新样本点是否满足约束,并检查SVM判定结果。
            # 如果SVM判定失误,重新训练SVM模型
            # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
            nextSampleNum = nextSample.shape[0]
            nextValue = np.zeros(nextSampleNum)
            nextFuncMark = np.zeros(nextSampleNum)
            for i in range(nextSampleNum):
                nextValue[i] = testFunc.aim(nextSample[i, :])
                nextFuncMark[i] = testFunc.isOK(nextSample[i, :])

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            # 如果只在发现SVM判断错误的前提下重训练,一般只会提高查准率,而不利于查全率的提升。
            # 如果发现最优点满足约束,也应重训练,以增大附近可行区域
            print('训练支持向量机...')
            svm.fit(samples, mark)
            self.f.report(svm, self.testData)

            kriging.fit(samples, value, self.f.l, self.f.u, theta)

            print('搜索kriging模型在约束区间的最优值.....')
            ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)
            print('最优值的实际判定结果%.4f' % testFunc.isOK(kriging.optimumLocation))

            Data = np.hstack((samples, value.reshape(
                (-1, 1)), mark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        while testFunc.isOK(kriging.optimumLocation) == -1:

            nextSample = kriging.optimumLocation
            nextValue = testFunc.aim(nextSample)
            nextFuncMark = testFunc.isOK(nextSample)

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            print('区间错误,训练支持向量机...')
            svm.fit(samples, mark)
            self.f.report(svm, self.testData)

            print('搜索kriging模型在约束区间的最优值.....')
            kriging.fit(samples, value, self.f.l, self.f.u, theta)
            ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)
            print('最优值的实际判定结果%.4f' % testFunc.isOK(kriging.optimumLocation))

            Data = np.hstack((samples, value.reshape(
                (-1, 1)), mark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        print('全局最优值:', kriging.optimum)
        print('全局最优值坐标:', kriging.optimumLocation)
    def Step_A(self, initSampleNum=100, auxiliarySampleNum=10):
        '''初步搜索设计空间\n
        input : \n
        initSampleNum : 整型,初始采样点数目\n
        auxiliarySampleNum : 整型,附加采样点数目\n'''

        #生成采样点
        lh = LatinHypercube(self.f.dim, initSampleNum, self.f.l, self.f.u)
        samples = lh.realSamples
        np.savetxt(self.logPath + '/InitSamples.txt', samples, delimiter=',')

        # samples = np.loadtxt(self.logPath+'/InitSamples.txt',delimiter=',')

        value = np.zeros(initSampleNum)
        for i in range(initSampleNum):
            value[i] = self.f.aim(samples[i, :])

        #建立响应面
        kriging = Kriging()
        kriging.fit(samples, value, self.f.l, self.f.u)

        print('正在优化theta参数...')
        theta = kriging.optimize(10000, self.logPath + '/theta优化种群数据.txt')
        # theta = [0.04594392,0.001,0.48417354,0.001,0.02740766]

        for k in range(auxiliarySampleNum):
            print('第%d次加点...' % (k + 1))
            nextSample = kriging.nextPoint_Varience()
            samples = np.vstack([samples, nextSample])
            value = np.append(value, self.f.aim(nextSample))
            kriging.fit(samples, value, self.f.l, self.f.u, theta)
            # kriging.optimize(100)

        #检测样本点中是否有可行解,如果没有继续加点
        mark = np.zeros(samples.shape[0])
        for i in range(samples.shape[0]):
            mark[i] = self.f.isOK(samples[i, :])

        if np.sum(mark == 1) > 0:
            value = value.reshape((-1, 1))
            mark = mark.reshape((-1, 1))
            storeData = np.hstack((samples, value, mark))
            np.savetxt(self.logPath + '/A_Samples.txt', storeData)
            return
        else:
            print('在所有样本中未能发现可行域,继续加点...')

        i = 0
        while mark[-1] == -1:
            i += 1
            print('第%d次加点...' % (auxiliarySampleNum + i))
            nextSample = kriging.nextPoint_Varience()
            samples = np.vstack([samples, nextSample])
            value = np.append(value, self.f.aim(nextSample))
            mark = np.append(mark, self.f.isOK(nextSample))
            kriging.fit(samples, value, self.f.l, self.f.u, theta)
            # kriging.optimize(100)

        value = value.reshape((-1, 1))
        mark = mark.reshape((-1, 1))
        storeData = np.hstack((samples, value, mark))
        np.savetxt(self.logPath + '/A_Samples.txt', storeData)
Azi_Tolerance=180
Lag_Distance=10
Lag_Tolerance=5
metric="Euclidean"
       
#Variogram modeling after semivariogram Calculation
sill = 95000
nugget = 38000
maxRange = 30 #range of variogram model where maximum variance is achieved.
variogramType = 'spherical' #variogram type

#For prediction      
originX = 1
originY = 1  
cellsizeX = 1
cellsizeY =1
xMax = 260
yMax = 300
neighborhood_radius = 30

model = Kriging(x,y,grades,
                 originX,originY,cellsizeX,cellsizeY,xMax,yMax,
                 neighborhood_radius,
                 variogramType, sill, nugget, maxRange,
                 Azimuth,Azi_Tolerance,Lag_Distance,Lag_Tolerance,metric)

predictions = model.predict()

corr = pearsonr(test['v'], predictions)
print(corr)
Esempio n. 6
0
    def Step_C(self):
        #违反约束的惩罚系数
        #惩罚系数必须足够大,足以弥补EI函数与y之间的数量差距
        penalty = 10000000000000

        # 加载支持向量机
        Kernal_Gau = lambda x, y: np.exp((-np.linalg.norm(x - y)**2) / 90)
        Kernal_Poly = lambda x, y: (np.dot(x, y) + 1)**9
        svm = SVM(1000,
                  kernal=Kernal_Gau,
                  path=self.logPath,
                  fileName='SVM_Step_C.txt')

        # 提取已采样样本的坐标,值,是否违反约束的标志
        testFunc = TestFunction_G4()

        # data = np.loadtxt(self.logPath+'/B_Samples.txt')
        data = np.loadtxt(self.logPath + '/全部样本点.txt')
        allSamples = data[:, 0:testFunc.dim]
        allValue = data[:, testFunc.dim]
        allMark = data[:, testFunc.dim + 1]

        print('训练初始支持向量机...')
        # svm.fit(allSamples,allMark,30000,maxAcc=1.1)
        # test28(svm)
        svm.retrain(self.logPath + '/SVM_Step_C.txt', maxIter=10, maxAcc=1.1)

        # #空间缩减,保留可行域外围1.1倍区域的超立方空间,避免因点数过多引起的计算冗余
        # space_min,space_max = FeasibleSpace(svm,[12,6,9,9,9],0.1)

        # #为了防止过拟合,支持向量机的训练并不彻底。所以也存在正例被判定为反例的情况。
        # # 导致SVM的正例区域不一定完整包含可行域,需要提取当前采样点的正例的空间,两个空间求并

        # allSamples_pos = allSamples[allMark==1]
        # add_space_min = np.min(allSamples_pos,0)
        # add_space_max = np.max(allSamples_pos,0)
        # add_space_max = (add_space_max-add_space_min)*0.1+add_space_max
        # add_space_min = add_space_min-(add_space_max-add_space_min)*0.1
        # for i in range(testFunc.dim):
        #     if space_min[i]>add_space_min[i]:
        #         space_min[i] = add_space_min[i]
        #     if space_max[i]<add_space_max[i]:
        #         space_max[i] = add_space_max[i]

        # #与函数取值空间比对,求交集
        # for i in range(testFunc.dim):
        #     if space_min[i]<testFunc.min[i]:
        #         space_min[i] = testFunc.min[i]
        #     if space_max[i]>testFunc.max[i]:
        #         space_max[i] = testFunc.max[i]

        space_min = testFunc.min
        space_max = testFunc.max
        #剔除搜索区域之外的样本点

        l = []
        for i in range(allSamples.shape[0]):
            for j in range(testFunc.dim):
                if allSamples[i, j] < space_min[j] or allSamples[
                        i, j] > space_max[j]:
                    l.append(i)
                    break

        samples = np.delete(allSamples, l, axis=0)
        value = np.delete(allValue, l)
        mark = np.delete(allMark, l)

        #建立响应面
        kriging = Kriging()

        theta = [0.279323019, 0.001, 3.15045620, 0.001, 0.179147511]
        kriging.fit(samples, value, space_min, space_max, theta)

        # print('正在优化theta参数....')
        # kriging.fit(samples, value, space_min, space_max)
        # theta = kriging.optimize(10000,self.logPath+'/ADE_theta.txt')

        # 搜索kriging模型在可行域中的最优值
        def kriging_optimum(x):
            y = kriging.get_Y(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return y - penaltyItem

        #kriging的global_optimum函数只能找到全局最优,而不是可行域最优
        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=5000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)
        print('SVM对最优值的判定结果%.4f' % svm.transform(kriging.optimumLocation))

        # testX = [78,34.6102252,30.98470067,29.68978243,28.85514208]
        # print('SVM对最优值的判定结果%.4f'%svm.transform(testX))
        # kriging.optimumLocation = testX
        # kriging.optimum = kriging.get_Y(testX)

        #目标函数是EI函数和约束罚函数的组合函数
        def EI_optimum(x):
            ei = kriging.EI(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return ei + penaltyItem

        def Varience_optimum(x):
            s = kriging.get_S(x)
            penaltyItem = penalty * min(0, svm.transform(x))
            return s + penaltyItem

        iterNum = 100  #迭代次数
        maxEI_threshold = 0.0001
        smallestDistance = 0.01

        for k in range(iterNum):
            print('\n第%d轮加点.........' % k)
            #每轮加点为方差最大值,EI函数最大值

            print('搜索EI函数在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, EI_optimum, False)
            opt_ind = ade.evolution(maxGen=5000)
            nextSample = opt_ind.x
            maxEI = EI_optimum(opt_ind.x)

            while maxEI < 0:
                print('EI函数最优值求解失败,重新求解...')
                ade = ADE(space_min, space_max, 200, 0.5, EI_optimum, False)
                opt_ind = ade.evolution(maxGen=5000)
                nextSample = opt_ind.x
                maxEI = EI_optimum(opt_ind.x)
            print('EI函数最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            print('搜索方差在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, Varience_optimum, False)
            opt_ind = ade.evolution(5000, 0.8)
            nextSample = np.vstack((nextSample, opt_ind.x))
            print('方差最优值实际约束判定:%d' % testFunc.isOK(opt_ind.x))

            #如果加点过于逼近,只选择一个点
            nextSample = filterSamples(nextSample, samples, smallestDistance)

            #判定终止条件

            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

            # 检查新样本点是否满足约束,并检查SVM判定结果。
            # 如果SVM判定失误,重新训练SVM模型
            # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
            nextSampleNum = nextSample.shape[0]
            nextValue = np.zeros(nextSampleNum)
            nextFuncMark = np.zeros(nextSampleNum)
            nextSVMMark = np.zeros(nextSampleNum)
            for i in range(nextSampleNum):
                nextValue[i] = testFunc.aim(nextSample[i, :])
                nextFuncMark[i] = testFunc.isOK(nextSample[i, :])
                nextSVMMark[i] = svm.transform(nextSample[i, :])

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            allSamples = np.vstack((allSamples, nextSample))
            allValue = np.append(allValue, nextValue)
            allMark = np.append(allMark, nextFuncMark)

            # 如果只在发现SVM判断错误的前提下重训练,一般只会提高查准率,而不利于查全率的提升。
            # 如果发现最优点满足约束,也应重训练,以增大附近可行区域
            print('训练支持向量机...')
            svm.fit(samples, mark, 30000, maxAcc=1.1)
            test28(svm)

            # for i in range(nextSampleNum):
            #     if (nextFuncMark[i] == -1 and nextSVMMark[i] > 0) or (nextFuncMark[i] == 1 and nextSVMMark[i] < 0):
            #         print('新采样点的计算结果与SVM判定不符,重新训练SVM模型.......')
            #         svm.fit(samples,mark,30000,maxAcc=1.1)
            #         test28(svm)
            #         break

            #     if nextFuncMark[i] == -1 and nextSVMMark[i] < 0:
            #         print('新采样点位于违反约束区域,惩罚系数乘2')
            #         penalty *= 1.1

            kriging.fit(samples, value, space_min, space_max, theta)

            print('搜索kriging模型在约束区间的最优值.....')
            ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)

            Data = np.hstack((allSamples, allValue.reshape(
                (-1, 1)), allMark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        while testFunc.isOK(kriging.optimumLocation) == -1:

            nextSample = kriging.optimumLocation
            nextValue = testFunc.aim(nextSample)
            nextFuncMark = testFunc.isOK(nextSample)

            samples = np.vstack((samples, nextSample))
            value = np.append(value, nextValue)
            mark = np.append(mark, nextFuncMark)

            allSamples = np.vstack((allSamples, nextSample))
            allValue = np.append(allValue, nextValue)
            allMark = np.append(allMark, nextFuncMark)

            print('区间错误,训练支持向量机...')
            svm.fit(samples, mark, 30000, maxAcc=1.1)
            test28(svm)

            print('搜索kriging模型在约束区间的最优值.....')
            kriging.fit(samples, value, space_min, space_max, theta)
            ade = ADE(space_min, space_max, 200, 0.5, kriging_optimum, True)
            opt_ind = ade.evolution(maxGen=5000)
            kriging.optimumLocation = opt_ind.x
            kriging.optimum = kriging.get_Y(opt_ind.x)

            Data = np.hstack((allSamples, allValue.reshape(
                (-1, 1)), allMark.reshape((-1, 1))))
            np.savetxt(self.logPath + '/全部样本点.txt', Data, delimiter='\t')

        print('全局最优值:', kriging.optimum)
        print('全局最优值坐标:', kriging.optimumLocation)
Esempio n. 7
0
def stepC_1():
    '''步骤C的第一种版本'''
    #违反约束的惩罚系数
    penalty = 10000
    root_path = './Data/约束优化算法测试1/stepC_5'
    import os
    if not os.path.exists(root_path):
        os.makedirs(root_path)

    # 加载支持向量机
    svm = SVM(5, kernal=Kernal_Polynomial, path=root_path)
    svm.retrain(root_path + '/SVM_Argument_2019-01-15_11-41-00.txt', maxIter=1)

    # 提取已采样样本的坐标,值,是否违反约束的标志
    allSamples = svm.x
    allValue = np.zeros(allSamples.shape[0])
    allMark = np.zeros(allSamples.shape[0])
    testFunc = TestFunction_G8()
    for i in range(allSamples.shape[0]):
        allValue[i] = testFunc.aim(allSamples[i, :])
        allMark[i] = testFunc.isOK(allSamples[i, :])

    #空间缩减,保留可行域外围1.1倍区域的超立方空间,避免因点数过多引起的计算冗余
    space_min, space_max = FeasibleSpace(svm, 0.1, [100, 100])
    x, y = np.mgrid[space_min[0]:space_max[0]:100j,
                    space_min[1]:space_max[1]:100j]

    #剔除搜索区域之外的样本点

    l = []
    for i in range(allSamples.shape[0]):
        if allSamples[i,0]<space_min[0] or allSamples[i,1]<space_min[1] \
        or allSamples[i,0]>space_max[0] or allSamples[i,1]>space_max[1]:
            l.append(i)

    samples = np.delete(allSamples, l, axis=0)
    value = np.delete(allValue, l)
    mark = np.delete(allMark, l)

    #建立响应面
    kriging = Kriging()
    # theta = [32.22662213, 18.59361027]
    # kriging.fit(samples, value, space_min, space_max, theta)

    print('正在优化theta参数....')
    kriging.fit(samples, value, space_min, space_max)
    theta = kriging.optimize(10000, root_path + '/ADE_theta.txt')

    # 搜索kriging模型在可行域中的最优值
    def kriging_optimum(x):
        y = kriging.get_Y(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return y - penaltyItem

    print('搜索kriging模型在约束区间的最优值.....')
    ade = ADE(space_min, space_max, 100, 0.5, kriging_optimum, True)
    opt_ind = ade.evolution(maxGen=100000)
    kriging.optimumLocation = opt_ind.x
    kriging.optimum = kriging.get_Y(opt_ind.x)

    #目标函数是EI函数和约束罚函数的组合函数
    def EI_optimum(x):
        ei = kriging.EI(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return ei + penaltyItem

    def Varience_optimum(x):
        s = kriging.get_S(x)
        penaltyItem = penalty * min(0, svm.transform(x))
        return s + penaltyItem

    iterNum = 100  #加点数目
    preValue = np.zeros_like(x)
    EI_Value = np.zeros_like(x)
    varience = np.zeros_like(x)

    maxEI_threshold = 0.0001
    optimum_threshold = 0.0001
    smallestDistance = 0.01
    lastOptimum = None

    for k in range(iterNum):
        #遍历响应面
        print('正在遍历响应面...')
        for i in range(0, x.shape[0]):
            for j in range(0, x.shape[1]):
                a = [x[i, j], y[i, j]]
                if (svm.transform(a) < 0):
                    preValue[i, j] = 0
                    varience[i, j] = 0
                    EI_Value[i, j] = 0
                else:
                    preValue[i, j] = kriging.get_Y(a)
                    varience[i, j] = kriging.get_S(a)
                    EI_Value[i, j] = kriging.EI(a)

        path1 = root_path + '/Kriging_Predicte_Model_%d.txt' % k
        writeFile([x, y, preValue], [samples, value], path1)
        path2 = root_path + '/Kriging_Varience_Model_%d.txt' % k
        writeFile([x, y, varience], [samples, value], path2)
        path3 = root_path + '/Kriging_EI_Model_%d.txt' % k
        writeFile([x, y, EI_Value], [samples, value], path3)

        print('\n第%d轮加点.........' % k)
        #每轮加点为方差最大值,EI函数最大值

        # 不建议加入最优值,因为最优值与原有采样点的距离过于接近了。
        # nextSample = kriging.optimumLocation

        print('搜索EI函数在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, EI_optimum, False)
        opt_ind = ade.evolution(maxGen=100000)
        nextSample = opt_ind.x
        # nextSample = np.vstack((nextSample,opt_ind.x))
        maxEI = EI_optimum(opt_ind.x)

        print('搜索方差在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, Varience_optimum, False)
        opt_ind = ade.evolution(10000, 0.8)
        nextSample = np.vstack((nextSample, opt_ind.x))

        #如果加点过于逼近,只选择一个点
        nextSample = filterSamples(nextSample, samples, smallestDistance)

        #判定终止条件
        if k == 0:
            lastOptimum = kriging.optimum
        else:
            # 当MaxEI小于EI门限值说明全局已经没有提升可能性
            if maxEI < maxEI_threshold:
                print('EI全局最优值小于%.5f,计算终止' % maxEI_threshold)
                break
            else:
                print('EI全局最优值%.5f' % maxEI)

            #以最优值提升为终止条件极容易导致算法过早停止
            # # 当全局最优值两轮变化小于最优值门限
            # if abs(lastOptimum-kriging.optimum) < optimum_threshold:
            #     print('kriging模型全局最优值的提升小于%.5f,计算终止'%optimum_threshold)
            #     break
            # else:
            #     print('kriging模型全局最优值的提升%.5f'%(abs(lastOptimum-kriging.optimum)))
            #     lastOptimum = kriging.optimum

            # 当加点数目为0,说明新加点与原有点的距离过近
            if nextSample.shape[0] == 0:
                print('新加点的数目为0 ,计算终止')
                break
            else:
                print('本轮加点数目%d' % nextSample.shape[0])

        # 检查新样本点是否满足约束,并检查SVM判定结果。
        # 如果SVM判定失误,重新训练SVM模型
        # 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
        nextSampleNum = nextSample.shape[0]
        nextValue = np.zeros(nextSampleNum)
        nextFuncMark = np.zeros(nextSampleNum)
        nextSVMMark = np.zeros(nextSampleNum)
        for i in range(nextSampleNum):
            nextValue[i] = testFunc.aim(nextSample[i, :])
            nextFuncMark[i] = testFunc.isOK(nextSample[i, :])
            nextSVMMark[i] = svm.transform(nextSample[i, :])

        samples = np.vstack((samples, nextSample))
        value = np.append(value, nextValue)
        mark = np.append(mark, nextFuncMark)

        allSamples = np.vstack((allSamples, nextSample))
        allValue = np.append(allValue, nextValue)
        allMark = np.append(allMark, nextFuncMark)

        for i in range(nextSampleNum):
            if (nextFuncMark[i] == -1
                    and nextSVMMark[i] > 0) or (nextFuncMark[i] == 1
                                                and nextSVMMark[i] < 0):
                print('新采样点的计算结果与SVM判定不符,重新训练SVM模型.......')
                svm.fit(samples, mark, 500000)
                svm.show()

                #不建议多次设定搜索区域,首先SVM的外围需要一定的负样本来限定超平面,同时kriging模型
                #也需要负样本来评判目标函数在边界处的取值。反复设定搜索区域的确会减少点的数目,但约束外侧
                #过少的采样点会使后续的预测恶化。

                # #设定搜索区域
                # space_min,space_max = FeasibleSpace(svm,0.1)
                # x, y = np.mgrid[space_min[0]:space_max[0]:100j, space_min[1]:space_max[1]:100j]

                # #剔除搜索区域之外的样本点

                # l = []
                # for i in range(allSamples.shape[0]):
                #     if allSamples[i,0]<space_min[0] or allSamples[i,1]<space_min[1] \
                #     or allSamples[i,0]>space_max[0] or allSamples[i,1]>space_max[1]:
                #         l.append(i)

                # samples = np.delete(allSamples,l,axis=0)
                # value = np.delete(allValue,l)
                # mark = np.delete(allMark,l)
                break

            if nextFuncMark[i] == -1 and nextSVMMark[i] < 0:
                print('新采样点位于违反约束区域,惩罚系数乘2')
                penalty *= 1.1

        print('正在优化theta参数....')
        kriging.fit(samples, value, space_min, space_max)
        theta = kriging.optimize(10000, root_path + '/ADE_theta.txt')

        print('搜索kriging模型在约束区间的最优值.....')
        ade = ADE(space_min, space_max, 100, 0.5, kriging_optimum, True)
        opt_ind = ade.evolution(maxGen=100000)
        kriging.optimumLocation = opt_ind.x
        kriging.optimum = kriging.get_Y(opt_ind.x)

        Data = np.hstack((samples, value.reshape((-1, 1)), mark.reshape(
            (-1, 1))))
        np.savetxt(root_path + '/优化结果.txt', Data, delimiter='\t')

    print('全局最优值:', kriging.optimum)
    print('全局最优值坐标:', kriging.optimumLocation)
Esempio n. 8
0
def stepA_1():
    '''
    步骤A的第一种版本,加点过程是确定数目的,并以方差为优化目标,降低全局的不确定性
    '''
    f = TestFunction_G8()
    min = f.min
    max = f.max

    #遍历设计空间
    x, y = np.mgrid[min[0]:max[0]:100j, min[1]:max[1]:100j]
    s = np.zeros_like(x)
    for i in range(x.shape[0]):
        for j in range(x.shape[1]):
            a = [x[i, j], y[i, j]]
            s[i, j] = f.aim(a)

    #生成样本点
    sampleNum = 20
    # lh=LatinHypercube(2,sampleNum,min,max)
    # realSample=lh.realSamples
    # np.savetxt('./Data/约束优化算法测试1/realSample.txt',realSample,delimiter=',')

    realSample = np.loadtxt('./Data/约束优化算法测试1/realSample.txt', delimiter=',')

    value = np.zeros(sampleNum)
    for i in range(0, sampleNum):
        a = [realSample[i, 0], realSample[i, 1]]
        value[i] = f.aim(a)

    #建立响应面
    kriging = Kriging()
    kriging.fit(realSample, value, min, max)

    print('正在优化theta参数....')
    theta = kriging.optimize(1000, './Data/约束优化算法测试1/ADE_theta.txt')
    #计算预测值和方差
    preValue = np.zeros_like(x)
    varience = np.zeros_like(x)

    print('正在遍历响应面...')
    for i in range(0, x.shape[0]):
        for j in range(0, x.shape[1]):
            a = [x[i, j], y[i, j]]
            preValue[i, j], varience[i, j] = kriging.transform(np.array(a))
    print('正在保存输出文件...')
    path = './Data/约束优化算法测试1/Kriging_Predicte_Model.txt'
    writeFile([x, y, preValue], [realSample, value], path)
    path = './Data/约束优化算法测试1/Kriging_Varience_Model.txt'
    writeFile([x, y, varience], [realSample, value], path)
    path = './Data/约束优化算法测试1/Kriging_True_Model.txt'
    writeFile([x, y, s], [realSample, value], path)

    iterNum = 10  #加点数目
    for k in range(iterNum):
        print('第%d次加点' % (k + 1))
        nextSample = kriging.nextPoint_Varience()
        realSample = np.vstack([realSample, nextSample])
        value = np.append(value, f.aim(nextSample))
        kriging.fit(realSample, value, min, max, theta)
        # kriging.optimize(100)

        #遍历响应面
        print('正在遍历响应面...')
        for i in range(0, x.shape[0]):
            for j in range(0, x.shape[1]):
                a = [x[i, j], y[i, j]]
                preValue[i, j], varience[i, j] = kriging.transform(np.array(a))

        path = './Data/约束优化算法测试1/Kriging_Predicte_Model_%d.txt' % k
        writeFile([x, y, preValue], [realSample, value], path)
        path = './Data/约束优化算法测试1/Kriging_Varience_Model_%d.txt' % k
        writeFile([x, y, varience], [realSample, value], path)

    #检测样本点中是否有可行解,如果没有继续加点
    mark = np.zeros(realSample.shape[0])
    for i in range(realSample.shape[0]):
        mark[i] = f.isOK(realSample[i, :])

    if np.sum(mark == 1) > 0:
        value = value.reshape((-1, 1))
        mark = mark.reshape((-1, 1))
        storeData = np.hstack((realSample, value, mark))
        np.savetxt('./Data/约束优化算法测试1/samples1.txt', storeData)
        return

    i = 0
    while mark[-1] == -1:
        i += 1
        print('第%d次加点' % (iterNum + i))
        nextSample = kriging.nextPoint_Varience()
        realSample = np.vstack([realSample, nextSample])
        value = np.append(value, f.aim(nextSample))
        mark = np.append(mark, f.isOK(nextSample))
        kriging.fit(realSample, value, min, max, theta)
        # kriging.optimize(100)

        #遍历响应面
        print('正在遍历响应面...')
        for i in range(0, x.shape[0]):
            for j in range(0, x.shape[1]):
                a = [x[i, j], y[i, j]]
                preValue[i, j], varience[i, j] = kriging.transform(np.array(a))

        path = './Data/约束优化算法测试1/Kriging_Predicte_Model_%d.txt' % (iterNum + i)
        writeFile([x, y, preValue], [realSample, value], path)
        path = './Data/约束优化算法测试1/Kriging_Varience_Model_%d.txt' % (iterNum + i)
        writeFile([x, y, varience], [realSample, value], path)

    value = value.reshape((-1, 1))
    mark = mark.reshape((-1, 1))
    storeData = np.hstack((realSample, value, mark))
    np.savetxt('./Data/约束优化算法测试1/samples1.txt', storeData)