示例#1
0
    def __init__(self, dim):

        self.__Pop = []  # population set
        self.__PosPop = []  # positive sample set
        self.__Optimal = []  # the best sample so far
        self.__NextPop = []  # the next population set
        self.__region = []  # the region of model
        self.__label = []  # the random label, if true random in this dimension
        self.__SampleSize = 0  # the instance number of sampling in an iteration
        self.__MaxIteration = 0  # the number of iterations
        self.__Budget = 0  # budget of evaluation
        self.__PositiveNum = 0  # the set size of PosPop
        self.__RandProbability = 0  # the probability of sample in model
        self.__UncertainBits = 0  # the dimension size that is sampled randomly
        self.__OnlineSwitch = False
        self.__dimension = dim

        for i in range(dim.getSize()):
            reg = []
            reg.append(0)
            reg.append(0)
            self.__region.append(reg)
            self.__label.append(True)

        self.__ro = RandomOperator()
        return
示例#2
0
    def __init__(self, dimension):

        self.__pop = []  # population set
        self.__pos_pop = []  # positive sample set
        self.__optimal = None  # the best sample so far
        self.__region = []  # the region of model
        self.__label = []  # the random label, if true random in this dimension
        self.__sample_size = 0  # the instance size of sampling in an iteration
        self.__budget = 0  # budget of evaluation
        self.__positive_num = 0  # positive sample set size
        self.__rand_probability = 0.0  # the probability of sampling in model
        self.__uncertain_bit = 0  # the dimension size of sampling randomly
        self.__dimension = dimension

        # sampling saving
        self.__model_ins = []  # positive sample used to modeling
        self.__negative_set = []  # negative set used to modeling
        self.__new_ins = []  # new sample
        self.__sample_label = []  # the label of each sample

        for i in range(self.__dimension.get_size()):
            region = [0.0, 0.0]
            self.__region.append(region)
            self.__label.append(0)

        self.__ro = RandomOperator()
        return
示例#3
0
    def __init__(self, dim=None, bias_region=[-0.2, 0.2]):

        ro = RandomOperator()

        self.__dimension = dim
        # generate bias randomly
        self.__bias = []
        for i in range(self.__dimension.get_size()):
            # self.__bias.append(ro.getUniformDouble(self.__dimension.getRegion(i)[0], self.__dimension.getRegion(i)[1]))
            self.__bias.append(
                ro.get_uniform_double(bias_region[0], bias_region[1]))
        # print 'bias:', self.__bias

        return
示例#4
0
文件: Racos.py 项目: eyounx/RACOS
    def __init__(self, dim):

        self.__Pop = []             # population set
        self.__PosPop = []          # positive sample set
        self.__Optimal = []         # the best sample so far
        self.__NextPop = []         # the next population set
        self.__region = []          # the region of model
        self.__label = []           # the random label, if true random in this dimension
        self.__SampleSize = 0       # the instance number of sampling in an iteration
        self.__MaxIteration = 0     # the number of iterations
        self.__Budget = 0           # budget of evaluation
        self.__PositiveNum = 0      # the set size of PosPop
        self.__RandProbability = 0  # the probability of sample in model
        self.__UncertainBits = 0    # the dimension size that is sampled randomly
        self.__OnlineSwitch = False
        self.__dimension = dim

        for i in range(dim.getSize()):
            reg = []
            reg.append(0)
            reg.append(0)
            self.__region.append(reg)
            self.__label.append(True)

        self.__ro = RandomOperator()
        return
示例#5
0
文件: Racos.py 项目: Cloud2016/RACOS
    def __init__(self, dim):
        self.__dimension = dim

        for i in range(dim.getSize()):
            reg = []
            reg.append(0)
            reg.append(0)
            self.__region.append(reg)
            self.__label.append(True)

        self.__ro = RandomOperator()
        return
示例#6
0
文件: Racos.py 项目: eyounx/RACOS
class RacosOptimization:

    def __init__(self, dim):

        self.__Pop = []             # population set
        self.__PosPop = []          # positive sample set
        self.__Optimal = []         # the best sample so far
        self.__NextPop = []         # the next population set
        self.__region = []          # the region of model
        self.__label = []           # the random label, if true random in this dimension
        self.__SampleSize = 0       # the instance number of sampling in an iteration
        self.__MaxIteration = 0     # the number of iterations
        self.__Budget = 0           # budget of evaluation
        self.__PositiveNum = 0      # the set size of PosPop
        self.__RandProbability = 0  # the probability of sample in model
        self.__UncertainBits = 0    # the dimension size that is sampled randomly
        self.__OnlineSwitch = False
        self.__dimension = dim

        for i in range(dim.getSize()):
            reg = []
            reg.append(0)
            reg.append(0)
            self.__region.append(reg)
            self.__label.append(True)

        self.__ro = RandomOperator()
        return


    def OnlineTurnOn(self):
        self.__OnlineSwitch = True

    def OnlineTurnOff(self):
        self.__OnlineSwitch = False

    def Clear(self):
        self.__Pop = []
        self.__PosPop = []
        self.__Optimal = []
        self.__NextPop = []
        return

    # Parameters setting
    def setParameters(self, ss, mt, pn, rp, ub):
        self.__SampleSize = ss
        if self.__OnlineSwitch is False:
            self.__MaxIteration = mt
        else:
            self.__Budget = mt
        self.__PositiveNum = pn
        self.__RandProbability = rp
        self.__UncertainBits = ub
        return
    # -------------------------------------------------------
    # some test function
    def ShowPop(self, fea):
        print '----Pop----'
        for i in range(self.__SampleSize):
            if fea is True:
                print self.__Pop[i].getFeatures(), ':', self.__Pop[i].getFitness()
            else:
                print 'fitness:', self.__Pop[i].getFitness()
        return

    def ShowNextPop(self, fea):
        print '----NextPop----'
        for i in range(self.__SampleSize):
            if fea is True:
                print self.__NextPop[i].getFeatures(), ':', self.__NextPop[i].getFitness()
            else:
                print 'fitness:', self.__NextPop[i].getFitness()
        return

    def ShowPosPop(self, fea):
        print '----PosPop----'
        for i in range(self.__PositiveNum):
            if fea is True:
                print self.__PosPop[i].getFeatures(), ':', self.__PosPop[i].getFitness()
            else:
                print 'fitness:', self.__PosPop[i].getFitness()
        return

    def ShowRegion(self):
        print '----Region----'
        for i in range(self.__dimension.getSize):
            print 'dimension',i,'[', self.__region[i][0],',',self.__region[i][1],']'
        return

    def ShowLabel(self):
        print self.__label
        return
    # test function end
    # ----------------------------------------------------------------

    # Return optimal
    def getOptimal(self):
        return self.__Optimal


    # Generate an instance randomly
    def RandomInstance(self, dim, region, label):
        inst = Instance(dim)
        for i in range(dim.getSize()):
            if label[i] is True:
                if dim.getType(i) is True:
                    inst.setFeature(i, self.__ro.getUniformDouble(region[i][0], region[i][1]))
                else:
                    inst.setFeature(i, self.__ro.getUniformInteger(region[i][0], region[i][1]))
        return inst

    # generate an instance randomly
    def PosRandomInstance(self, dim, region, label, pos):
        ins = Instance(dim)
        for i in range(dim.getSize()):
            if label[i] is False:
                if dim.getType(i) is True:
                    ins.setFeature(i, self.__ro.getUniformDouble(region[i][0], region[i][1]))
                else:
                    ins.setFeature(i, self.__ro.getUniformInteger(region[i][0], region[i][1]))
            else:
                ins.setFeature(i, pos.getFeature(i))
        return ins

    # reset model
    def ResetModel(self):
        for i in range(self.__dimension.getSize()):
            self.__region[i][0] = self.__dimension.getRegion(i)[0]
            self.__region[i][1] = self.__dimension.getRegion(i)[1]
            self.__label[i] = True
        return

    # If an instance exists in list which is as same as ins, return True
    def InstanceInList(self, ins, list, end):
        for i in range(len(list)):
            if i == end:
                break
            if ins.Equal(list[i]) == True:
                return True
        return False

    # Initialize Pop, PosPop and Optimal
    def Initialize(self, func):
        temp = []

        # sample in original region under uniform distribution
        self.ResetModel()

        for i in range(self.__SampleSize+self.__PositiveNum):
            ins = []
            while(True):
                ins = self.RandomInstance(self.__dimension, self.__region, self.__label)
                if self.InstanceInList(ins, temp, i) is False:
                    break
            ins.setFitness(func(ins.getFeatures()))
            temp.append(ins)
        # sorted by fitness
        temp.sort(key=lambda instance: instance.getFitness())
        # initialize PosPop and Pop
        i = 0
        while(i<self.__PositiveNum):
            self.__PosPop.append(temp[i])
            i += 1
        while(i<self.__PositiveNum+self.__SampleSize):
            self.__Pop.append(temp[i])
            i += 1
        # initialize optimal
        self.__Optimal = self.__PosPop[0].CopyInstance()
        return

    # Generate model for sample next instance
    def ContinueShrinkModel(self, ins):
        ins_left = self.__SampleSize
        while(ins_left > 0):
            ChosenNeg = self.__ro.getUniformInteger(0, ins_left-1)
            ChosenDim = self.__ro.getUniformInteger(0, self.__dimension.getSize()-1)
            #shrinking
            if (ins.getFeature(ChosenDim) < self.__Pop[ChosenNeg].getFeature(ChosenDim)):
                btemp = self.__ro.getUniformDouble(ins.getFeature(ChosenDim), self.__Pop[ChosenNeg].getFeature(ChosenDim))
                if(btemp < self.__region[ChosenDim][1]):
                    self.__region[ChosenDim][1] = btemp
                    i = 0
                    while(i < ins_left):
                        if self.__Pop[i].getFeature(ChosenDim) >= btemp:
                            ins_left = ins_left - 1
                            itemp = self.__Pop[i]
                            self.__Pop[i] = self.__Pop[ins_left]
                            self.__Pop[ins_left] = itemp
                        else:
                            i += 1
            else:
                btemp = self.__ro.getUniformDouble(self.__Pop[ChosenNeg].getFeature(ChosenDim), ins.getFeature(ChosenDim))
                if (btemp > self.__region[ChosenDim][0]):
                    self.__region[ChosenDim][0] = btemp
                    i = 0
                    while(i < ins_left):
                        if self.__Pop[i].getFeature(ChosenDim) <= btemp:
                            ins_left = ins_left - 1
                            itemp = self.__Pop[i]
                            self.__Pop[i] = self.__Pop[ins_left]
                            self.__Pop[ins_left] = itemp
                        else:
                            i += 1

        return

    # Set uncertain bits
    def setUncertainBits(self):
        temp = []
        for i in range(self.__dimension.getSize()):
            temp.append(i)
        for i in range(self.__UncertainBits):
            index = self.__ro.getUniformInteger(0, self.__dimension.getSize()-i-1)
            self.__label[temp[index]] = False
            temp.remove(temp[index])
        return

    # Update PosPop list according to new Pop list generated latterly
    def UpdatePosPop(self):
        for i in range(self.__SampleSize):
            j = 0
            while(j<self.__PositiveNum):
                if(self.__Pop[i].getFitness()<self.__PosPop[j].getFitness()):
                    break
                else:
                    j += 1
            if(j < self.__PositiveNum):
                temp = self.__Pop[i]
                self.__Pop[i] = self.__PosPop[self.__PositiveNum-1]
                k = self.__PositiveNum-1
                while(k > j):
                    self.__PosPop[k] = self.__PosPop[k-1]
                    k -= 1
                self.__PosPop[j] = temp
        return

    def OnlineUpdate(self, ins):
        j = 0
        while (j < self.__PositiveNum):
            if (ins.getFitness() < self.__PosPop[j].getFitness()):
                break
            else:
                j += 1
        if (j < self.__PositiveNum):
            temp = ins
            ins = self.__PosPop[self.__PositiveNum - 1]
            k = self.__PositiveNum - 1
            while (k > j):
                self.__PosPop[k] = self.__PosPop[k - 1]
                k -= 1
            self.__PosPop[j] = temp

        j = 0
        while (j < self.__SampleSize):
            if (ins.getFitness() < self.__Pop[j].getFitness()):
                break
            else:
                j += 1
        if (j < self.__SampleSize):
            temp = ins
            ins = self.__Pop[self.__SampleSize - 1]
            k = self.__SampleSize - 1
            while (k > j):
                self.__Pop[k] = self.__Pop[k - 1]
                k -= 1
            self.__Pop[j] = temp


    # Update Optimal
    def UpdateOptimal(self):
        if(self.__Optimal.getFitness() > self.__PosPop[0].getFitness()):
            self.__Optimal = self.__PosPop[0].CopyInstance()
        return

    # If instances in Pop list are not in model, return True
    def Distinguish(self):
        for i in range(self.__SampleSize):
            j = 0
            while(j < self.__dimension.getSize()):
                if (self.__Pop[i].getFeature(j) > self.__region[j][0]) and (self.__Pop[i].getFeature(j) < self.__region[j][1]):
                    j += 1
                else:
                    break
            if (j == self.__dimension.getSize()):
                return False
        return True

    '''
    Racos for continue optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''
    def ContinueOpt(self, func, ss, mt, pn, rp, ub):

        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:
            # no online style
            for itera in range(self.__MaxIteration-1):

                self.__NextPop = []
                for sam in range(self.__SampleSize):
                    while(True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum-1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if(Gsample <= self.__RandProbability):
                            self.ContinueShrinkModel(self.__PosPop[ChosenPos])
                            self.setUncertainBits()
                        ins = self.PosRandomInstance(self.__dimension, self.__region, self.__label, self.__PosPop[ChosenPos])
                        if((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (self.InstanceInList(ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            #Online style
            BudCount = self.__SampleSize+self.__PositiveNum
            while BudCount < self.__Budget:
        #        print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        self.ContinueShrinkModel(self.__PosPop[ChosenPos])
                        self.setUncertainBits()
                    ins = self.PosRandomInstance(self.__dimension, self.__region, self.__label,
                                                 self.__PosPop[ChosenPos])
                    if ((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (
                        self.InstanceInList(ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break

                self.OnlineUpdate(ins)
                self.UpdateOptimal()


        return

    # Distinguish function for discrete optimization
    def DiscreteDistinguish(self, ins, ChosenDim):

        if len(ChosenDim) is 0:
            return 0

        for i in range(self.__SampleSize):
            j = 0
            while j < len(ChosenDim):
                if ins.getFeature(ChosenDim[j]) != self.__Pop[i].getFeature(ChosenDim[j]):
                    break
                j = j+1
            if j == len(ChosenDim):
                return 0
        return 1

    # PosRandomInstance function for discrete optimization
    def PosRandomDiscreteInstance(self, PosIns, dim, label):
        ins = Instance(dim)

        for i in range(dim.getSize()):
            if label[i] is True:
                ins.setFeature(i, PosIns.getFeature(i))
            else:
                ins.setFeature(i, self.__ro.getUniformInteger(dim.getRegion(i)[0], dim.getRegion(i)[1]))
        return ins

    # ShringModel function for discrete
    def DiscreteShrinkModel(self, ins, dim):


        NonChosenDim = []
        for i in range(dim.getSize()):
            NonChosenDim.append(i)
        ChosenDim = []

        while self.DiscreteDistinguish(ins, ChosenDim) == 0:
            tempDim = NonChosenDim[self.__ro.getUniformInteger(0, len(NonChosenDim)-1)]
            ChosenDim.append(tempDim)
            NonChosenDim.remove(tempDim)

        while len(NonChosenDim) > self.__UncertainBits:
            tempDim = NonChosenDim[self.__ro.getUniformInteger(0, len(NonChosenDim) - 1)]
            ChosenDim.append(tempDim)
            NonChosenDim.remove(tempDim)

        return NonChosenDim


    '''
    RACOS for discrete optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''
    def DiscreteOpt(self, func, ss, mt, pn, rp, ub):
        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:
            for itera in range(self.__MaxIteration - 1):

                self.__NextPop = []

                for sam in range(self.__SampleSize):
                    while (True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum - 1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if (Gsample <= self.__RandProbability):
                            NonChosenDim = self.DiscreteShrinkModel(self.__PosPop[ChosenPos], self.__dimension)
                            for i in range(len(NonChosenDim)):
                                self.__label[NonChosenDim[i]] = False
                        ins = self.PosRandomDiscreteInstance(self.__PosPop[ChosenPos], self.__dimension, self.__label)
                        if ((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (
                            self.InstanceInList(ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            BudCount = self.__SampleSize + self.__PositiveNum
            while BudCount < self.__Budget:
                print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        NonChosenDim = self.DiscreteShrinkModel(self.__PosPop[ChosenPos], self.__dimension)
                        for i in range(len(NonChosenDim)):
                            self.__label[NonChosenDim[i]] = False
                    ins = self.PosRandomDiscreteInstance(self.__PosPop[ChosenPos], self.__dimension, self.__label)
                    if ((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (
                                self.InstanceInList(ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break

                self.OnlineUpdate(ins)
                self.UpdateOptimal()
        return

    # Distinguish function for mixed optimization
    def MixDistinguish(self, ins):
        for i in range(self.__SampleSize):
            j = 0
            while j < self.__dimension.getSize():
                if self.__dimension.getType(j) is True:
                    if self.__Pop[i].getFeature(j) < self.__region[j][0] or self.__Pop[i].getFeature(j) > self.__region[j][1]:
                        break
                else:
                    if self.__label[j] is False and ins.getFeature(j) != self.__Pop[i].getFeature(j):
                        break
                j += 1
            if j >= self.__dimension.getSize():
                return False
        return True

    # PosRandomInstance for mixed optimization
    def PosRandomMixInstance(self, PosIns, dim, regi, lab):
        ins = Instance(dim)
        for i in range(dim.getSize()):
            if lab[i] is False:
                ins.setFeature(i, PosIns.getFeature(i))
            else:
                if dim.getType(i) is True: #continue
                    ins.setFeature(i, self.__ro.getUniformDouble(regi[i][0], regi[i][1]))
                else: #discrete
                    ins.setFeature(i, self.__ro.getUniformInteger(dim.getRegion(i)[0], dim.getRegion(i)[1]))
        return ins

    # ShrinkModel function for mixed optimization
    def MixShrinkModel(self, ins):

        ChosenDim = []
        NonChosenDim = []
        for i in range(self.__dimension.getSize()):
            NonChosenDim.append(i)

        count = 0
        while self.MixDistinguish(ins) is False:
            TempDim = NonChosenDim[self.__ro.getUniformInteger(0, len(NonChosenDim)-1)]
            ChosenNeg = self.__ro.getUniformInteger(0, self.__SampleSize-1)
            if self.__dimension.getType(TempDim) is True:  #continue
                if ins.getFeature(TempDim) < self.__Pop[ChosenNeg].getFeature(TempDim):
                    btemp = self.__ro.getUniformDouble(ins.getFeature(TempDim), self.__Pop[ChosenNeg].getFeature(TempDim))
                    if btemp < self.__region[TempDim][1]:
                        self.__region[TempDim][1] = btemp
                else:
                    btemp = self.__ro.getUniformDouble(self.__Pop[ChosenNeg].getFeature(TempDim), ins.getFeature(TempDim))
                    if btemp > self.__region[TempDim][0]:
                        self.__region[TempDim][0] = btemp
            else:
                ChosenDim.append(TempDim)
                NonChosenDim.remove(TempDim)
                self.__label[TempDim] = False

            count += 1

        while len(NonChosenDim) > self.__UncertainBits:
            TempDim = NonChosenDim[self.__ro.getUniformInteger(0, len(NonChosenDim) - 1)]
            ChosenDim.append(TempDim)
            NonChosenDim.remove(TempDim)
            self.__label[TempDim] = False

        return

    '''
    RACOS for mixed optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''
    def MixOpt(self, func, ss, mt, pn, rp, ub):
        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:

            for itera in range(self.__MaxIteration - 1):

                self.__NextPop = []

                # self.ShowPosPop(True)
                # self.ShowPop(True)

                for sam in range(self.__SampleSize):
                    while (True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum - 1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if (Gsample <= self.__RandProbability):
                            # print 'begin shrinking!'
                            self.MixShrinkModel(self.__PosPop[ChosenPos])
                        ins = self.PosRandomMixInstance(self.__PosPop[ChosenPos], self.__dimension, self.__region,
                                                        self.__label)
                        if ((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (
                                    self.InstanceInList(ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            #online style
            BudCount = self.__SampleSize + self.__PositiveNum
            while BudCount < self.__Budget:
                print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        # print 'begin shrinking!'
                        self.MixShrinkModel(self.__PosPop[ChosenPos])
                    ins = self.PosRandomMixInstance(self.__PosPop[ChosenPos], self.__dimension, self.__region,
                                                    self.__label)
                    if ((self.InstanceInList(ins, self.__PosPop, self.__PositiveNum) is False) and (
                                self.InstanceInList(ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break
                self.OnlineUpdate(ins)
                self.UpdateOptimal()

        return
from ObjectiveFunction import DistributedFunction
from Components import Dimension
from Tools import RandomOperator
import numpy as np

dimension_size = 10

dimension = Dimension()
dimension.set_dimension_size(dimension_size)
dimension.set_regions([[-0.5, 0.5] for _ in range(dimension_size)],
                      [0 for _ in range(dimension_size)])

func = DistributedFunction(dimension, bias_region=[-0.5, 0.5])
target_bias = [0.25 for _ in range(dimension_size)]
func.setBias(target_bias)

ro = RandomOperator()
prob_fct = func.DisRosenbrock
x0 = [ro.get_uniform_double(-0.5, 0.5) for _ in range(dimension_size)]
ans = []
for i in range(10):
    x, cost, _ = fmin_smac(func=prob_fct,
                           x0=x0,
                           bounds=[[-0.5, 0.5] for _ in range(dimension_size)],
                           maxfun=50,
                           rng=3)
    ans.append(x)
# print("Optimum at {} with cost of {}".format(x, cost))
print(np.mean(ans))
print(np.std(ans))
class ExpAdaRacosOptimization:
    def __init__(self, dimension, expert):

        self.__pop = []  # population set
        self.__pos_pop = []  # positive sample set
        self.__optimal = None  # the best sample so far
        self.__region = []  # the region of model
        self.__label = []  # the random label, if true random in this dimension
        self.__sample_size = 0  # the instance size of sampling in an iteration
        self.__budget = 0  # budget of evaluation
        self.__positive_num = 0  # positive sample set size
        self.__rand_probability = 0.0  # the probability of sampling in model
        self.__uncertain_bit = 0  # the dimension size of sampling randomly
        self.__dimension = dimension

        # sampling saving
        self.__model_ins = []  # positive sample used to modeling
        self.__negative_set = []  # negative set used to modeling
        self.__new_ins = []  # new sample
        self.__sample_label = []  # the label of each sample
        self.__sample_count = 0
        self.__predicts = []

        self.__expert = expert
        self.__adv_threshold = 0

        self.__sample_results = []

        self.__optimal_update_count = 0

        for i in range(self.__dimension.get_size()):
            region = [0.0, 0.0]
            self.__region.append(region)
            self.__label.append(0)

        self.__ro = RandomOperator()
        return

    # --------------------------------------------------
    # debugging
    # print positive set
    def show_pos_pop(self):
        print('positive set:------------------')
        for i in range(self.__positive_num):
            self.__pos_pop[i].show_instance()
        print('-------------------------------')
        return

    # print negative set
    def show_pop(self):
        print('negative set:------------------')
        for i in range(self.__sample_size):
            self.__pop[i].show_instance()
        print('-------------------------------')
        return

    # print optimal
    def show_optimal(self):
        print('optimal:-----------------------')
        self.__optimal.show_instance()
        print('-------------------------------')

    # print region
    def show_region(self):
        print('region:------------------------')
        for i in range(self.__dimension.get_size()):
            print('dimension ', i, ' [', self.__region[i][0], ',',
                  self.__region[i][1], ']')
        print('-------------------------------')
        return

    # print label
    def show_label(self):
        print('label:-------------------------')
        print(self.__label)
        print('-------------------------------')

    # --------------------------------------------------

    def get_predictors(self):
        return self.__predictors

    # clear sampling log
    def log_clear(self):
        self.__model_ins = []
        self.__negative_set = []
        self.__new_ins = []
        self.__sample_label = []
        return

    # generate environment that should be logged
    # return is 2-d list
    def generate_environment(self, ins, new_ins):

        trajectory = []
        if True:
            array_best = []
            for i in range(len(ins.get_features())):
                array_best.append(ins.get_features()[i])
            new_sam = []
            for i in range(len(new_ins.get_features())):
                new_sam.append(new_ins.get_features()[i])
            sorted_neg = sorted(self.__pop,
                                key=lambda instance: instance.get_fitness())
            for i in range(self.__sample_size):
                trajectory.append(sorted_neg[i].get_features())

        return array_best, trajectory, new_sam

    # clear parameters of saving
    def clear(self):
        self.__pop = []
        self.__pos_pop = []
        self.__optimal = None
        self.__expert.reset_weight()
        self.__sample_count = 0
        return

    # parameters setting
    def set_parameters(self, ss=0, bud=0, pn=0, rp=0.0, ub=0, at=0):
        self.__sample_size = ss
        self.__budget = bud
        self.__positive_num = pn
        self.__rand_probability = rp
        self.__uncertain_bit = ub
        self.__adv_threshold = at
        return

    # get optimal
    def get_optimal(self):
        return self.__optimal

    # generate an instance randomly
    def random_instance(self, dim, region, label):
        ins = Instance(dim)
        for i in range(dim.get_size()):
            if label[i] is True:
                if dim.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
        return ins

    # generate an instance based on a positive sample
    def pos_random_instance(self, dim, region, label, pos_instance):
        ins = Instance(dim)
        for i in range(dim.get_size()):
            if label[i] is False:
                if dim.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
            else:
                ins.set_feature(i, pos_instance.get_feature(i))
        return ins

    # reset model
    def reset_model(self):
        for i in range(self.__dimension.get_size()):
            self.__region[i][0] = self.__dimension.get_region(i)[0]
            self.__region[i][1] = self.__dimension.get_region(i)[1]
            self.__label[i] = True
        return

    # if an instance exists in a list, return true
    def instance_in_list(self, ins, this_list, end):
        for i in range(len(this_list)):
            if i == end:
                break
            if ins.equal(this_list[i]) is True:
                return True
        return False

    # initialize pop, pos_pop, optimal
    def initialize(self, func):

        temp = []

        self.reset_model()

        # sample in original region under uniform distribution
        for i in range(self.__sample_size + self.__positive_num):
            while True:
                ins = self.random_instance(self.__dimension, self.__region,
                                           self.__label)
                if self.instance_in_list(ins, temp, i) is False:
                    break
            ins.set_fitness(func(ins.get_features()))
            self.__sample_results.append(ins.get_fitness())
            temp.append(ins)

            # sorted by fitness
            temp.sort(key=lambda instance: instance.get_fitness())

        # initialize pos_pop
        for i in range(self.__positive_num):
            self.__pos_pop.append(temp[i])

        # initialize pop
        for i in range(self.__sample_size):
            self.__pop.append(temp[self.__positive_num + i])

        # initialize optimal
        self.__optimal = self.__pos_pop[0].copy_instance()

        return

    # distinguish function for mixed optimization
    def distinguish(self, exa, neg_set):
        for i in range(self.__sample_size):
            j = 0
            while j < self.__dimension.get_size():
                if self.__dimension.get_type(
                        j) == 0 or self.__dimension.get_type(j) == 1:
                    if neg_set[i].get_feature(
                            j) < self.__region[j][0] or neg_set[i].get_feature(
                                j) > self.__region[j][1]:
                        break
                else:
                    if self.__label[j] is False and exa.get_feature(
                            j) != neg_set[i].get_feature(j):
                        break
                j += 1
            if j >= self.__dimension.get_size():
                return False
        return True

    # update positive set and negative set using a new sample by online strategy
    def online_update(self, ins):

        # update positive set
        j = 0
        while j < self.__positive_num:
            if ins.get_fitness() < self.__pos_pop[j].get_fitness():
                break
            else:
                j += 1

        if j < self.__positive_num:
            temp = ins
            ins = self.__pos_pop[self.__positive_num - 1]
            k = self.__positive_num - 1
            while k > j:
                self.__pos_pop[k] = self.__pos_pop[k - 1]
                k -= 1
            self.__pos_pop[j] = temp

        # update negative set
        j = 0
        while j < self.__sample_size:
            if ins.get_fitness() < self.__pop[j].get_fitness():
                break
            else:
                j += 1
        if j < self.__sample_size:
            temp = ins
            ins = self.__pop[self.__sample_size - 1]
            k = self.__sample_size - 1
            while k > j:
                self.__pop[k] = self.__pop[k - 1]
                k -= 1
            self.__pop[j] = temp

        return

    # update optimal
    def update_optimal(self):
        if self.__pos_pop[0].get_fitness() < self.__optimal.get_fitness():
            self.__optimal_update_count += 1
            self.__optimal = self.__pos_pop[0].copy_instance()
        return

    # generate instance randomly based on positive sample for mixed optimization
    def pos_random_mix_isntance(self, exa, region, label):
        ins = Instance(self.__dimension)
        for i in range(self.__dimension.get_size()):
            if label[i] is False:
                ins.set_feature(i, exa.get_feature(i))
            else:
                # float random
                if self.__dimension.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                # integer random
                elif self.__dimension.get_type(i) == 1:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
                # categorical random
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(
                            self.__dimension.get_region(i)[0],
                            self.__dimension.get_region(i)[1]))
        return ins

    # generate model based on mixed optimization for next sampling
    # label[i] = false means this dimension should be set as the value in same dimension of positive sample
    def shrink_model(self, exa, neg_set):

        dist_count = 0

        chosen_dim = []
        non_chosen_dim = [i for i in range(self.__dimension.get_size())]

        remain_neg = [i for i in range(self.__sample_size)]

        while len(remain_neg) != 0:

            dist_count += 1

            temp_dim = non_chosen_dim[self.__ro.get_uniform_integer(
                0,
                len(non_chosen_dim) - 1)]
            chosen_neg = self.__ro.get_uniform_integer(0, len(remain_neg) - 1)
            # float dimension shrink
            if self.__dimension.get_type(temp_dim) == 0:
                if exa.get_feature(temp_dim) < neg_set[
                        remain_neg[chosen_neg]].get_feature(temp_dim):
                    temp_v = self.__ro.get_uniform_double(
                        exa.get_feature(temp_dim),
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim))
                    if temp_v < self.__region[temp_dim][1]:
                        self.__region[temp_dim][1] = temp_v
                else:
                    temp_v = self.__ro.get_uniform_double(
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim),
                        exa.get_feature(temp_dim))
                    if temp_v > self.__region[temp_dim][0]:
                        self.__region[temp_dim][0] = temp_v
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(temp_dim) < self.__region[temp_dim][0] or \
                            neg_set[remain_neg[r_i]].get_feature(temp_dim) > self.__region[temp_dim][1]:
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1
            # integer dimension shrink
            elif self.__dimension.get_type(temp_dim) == 1:
                if exa.get_feature(temp_dim) < neg_set[
                        remain_neg[chosen_neg]].get_feature(temp_dim):
                    temp_v = self.__ro.get_uniform_integer(
                        exa.get_feature(temp_dim),
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim) -
                        1)
                    if temp_v < self.__region[temp_dim][1]:
                        self.__region[temp_dim][1] = temp_v
                else:
                    temp_v = self.__ro.get_uniform_integer(
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim) -
                        1, exa.get_feature(temp_dim))
                    if temp_v > self.__region[temp_dim][0]:
                        self.__region[temp_dim][0] = temp_v
                if self.__region[temp_dim][0] == self.__region[temp_dim][1]:
                    chosen_dim.append(temp_dim)
                    non_chosen_dim.remove(temp_dim)
                    self.__label[temp_dim] = False
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(temp_dim) < self.__region[temp_dim][0] or \
                            neg_set[remain_neg[r_i]].get_feature(temp_dim) > self.__region[temp_dim][1]:
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1
            # categorical
            else:
                chosen_dim.append(temp_dim)
                non_chosen_dim.remove(temp_dim)
                self.__label[temp_dim] = False
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(
                            temp_dim) != exa.get_feature(temp_dim):
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1

        while len(non_chosen_dim) > self.__uncertain_bit:
            temp_dim = non_chosen_dim[self.__ro.get_uniform_integer(
                0,
                len(non_chosen_dim) - 1)]
            chosen_dim.append(temp_dim)
            non_chosen_dim.remove(temp_dim)
            self.__label[temp_dim] = False

        return dist_count

    def generate_inputs(self, ins, new_ins):
        trajectory = []
        if True:
            array_best = []
            for i in range(len(ins.get_features())):
                array_best.append(ins.get_features()[i])
            new_sam = []
            for i in range(len(new_ins.get_features())):
                new_sam.append(new_ins.get_features()[i])
            sorted_neg = sorted(self.__pop,
                                key=lambda instance: instance.get_fitness())
            for i in range(self.__sample_size):
                trajectory.append((np.array(sorted_neg[i].get_features()) -
                                   np.array(array_best)).tolist())
            trajectory.append(new_sam)
            trajectory = [trajectory]
        return trajectory

    # inputs is 4D list
    def predict(self, inputs):

        # print 'in prediction-----'
        inputs = torch.from_numpy(np.array(inputs)).float()
        inputs = Variable(inputs.cuda())

        # print 'input: ', inputs
        # inputs = Variable(inputs)

        outputs = []
        for i in range(len(self.__predictors)):
            # print i, ' predictor---'
            output = self.__predictors[i].predictor(inputs)
            # print 'out 1: ', output
            output = output.data.cpu().numpy()
            # print 'out numpy: ', output
            output = output.reshape(output.size).tolist()
            # print 'out list: ', output
            outputs.append(output)
        # outputs = np.array(outputs)
        # print 'all out: ', outputs
        # outputs = np.mean(outputs, axis=0).tolist()
        # print 'mean out: ', outputs
        # print '-------------------------------'
        return outputs

    def delete_predictor(self, prob_matrix, truth_index, truth_label):

        label_threshold = 0.5
        del_index = []
        # delete all predictors which make mistakes
        if True:
            for i in range(len(prob_matrix)):
                if prob_matrix[i][truth_index] > label_threshold:
                    this_label = 1
                else:
                    this_label = 0
                if this_label != truth_label:
                    del_index.append(i)

        # delete the predictors which can't find the positive
        if False:
            if truth_label == 1:
                for i in range(len(prob_matrix)):
                    if prob_matrix[i][truth_index] > label_threshold:
                        this_label = 1
                    else:
                        this_label = 0
                    if this_label != truth_label:
                        del_index.append(i)

        # delete predictors
        new_predictors = []
        for i in range(len(self.__predictors)):
            if i not in del_index:
                new_predictors.append(self.__predictors[i])
        self.__predictors = new_predictors

        return

    # sequential Racos for mixed optimization
    # the dimension type includes float, integer and categorical
    def exp_ada_mix_opt(self,
                        obj_fct=None,
                        ss=2,
                        bud=20,
                        pn=1,
                        rp=0.95,
                        ub=1,
                        at=5,
                        step=1,
                        plot=False):

        sample_count = 0
        all_dist_count = 0
        log_buffer = []
        # initialize sample set
        self.clear()
        self.log_clear()
        self.set_parameters(ss=ss, bud=bud, pn=pn, rp=rp, ub=ub, at=at)
        self.reset_model()
        self.initialize(obj_fct)

        # ------------------------------------------------------
        # print 'after initialization------------'
        # self.show_pos_pop()
        # self.show_pop()
        # ------------------------------------------------------

        # optimization
        budget_c = self.__sample_size + self.__positive_num
        while budget_c < self.__budget:
            budget_c += 1
            if budget_c % 10 == 0:
                # print '======================================================'
                print('budget ', budget_c, ':', self.__optimal.get_fitness())
                log_buffer.append('budget ' + str(budget_c) + ':' +
                                  str(self.__optimal.get_fitness()))
                print(self.weights)
                log_buffer.append(str(self.weights))
                if plot:
                    if False:  # plot two color
                        index = [
                            i * 2 + 1
                            for i in range(int(len(self.weights) / 2))
                        ]
                        plt.scatter(range(len(self.weights[index])),
                                    self.weights[index],
                                    c='red')
                        plt.scatter(
                            range(
                                len(self.weights[[
                                    i * 2
                                    for i in range(int(len(self.weights) / 2))
                                ]])),
                            self.weights[[
                                i * 2
                                for i in range(int(len(self.weights) / 2))
                            ]],
                            c='blue')
                    else:

                        plt.scatter(range(len(self.weights)), self.weights)
                        plt.show()
                # self.__optimal.show_instance()
            adv_samples = []
            adv_inputs = []
            for adv_i in range(self.__adv_threshold):
                while True:
                    self.reset_model()
                    chosen_pos = self.__ro.get_uniform_integer(
                        0, self.__positive_num - 1)
                    model_sample = self.__ro.get_uniform_double(0.0, 1.0)
                    if model_sample <= self.__rand_probability:
                        dc = self.shrink_model(self.__pos_pop[chosen_pos],
                                               self.__pop)
                        all_dist_count += dc

                    # -----------------------------------------------------------
                    # self.show_region()
                    # self.show_label()
                    # -----------------------------------------------------------

                    ins = self.pos_random_mix_isntance(
                        self.__pos_pop[chosen_pos], self.__region,
                        self.__label)

                    sample_count += 1

                    if (self.instance_in_list(ins, self.__pos_pop,
                                              self.__positive_num) is
                            False) and (self.instance_in_list(
                                ins, self.__pop, self.__sample_size) is False):
                        # ins.set_fitness(obj_fct(ins.get_features()))
                        # ------------------------------------------------------
                        # print 'new sample:-------------------'
                        # ins.show_instance()
                        # print '------------------------------'
                        # ------------------------------------------------------

                        break
                this_input = self.generate_inputs(self.__pos_pop[chosen_pos],
                                                  ins)
                adv_inputs.append(this_input)
                adv_samples.append(ins)

            # print 'inputs: ', len(adv_inputs), '*', len(adv_inputs[0]), '*', len(adv_inputs[0][0]), '*', len(adv_inputs[0][0][0])
            # get probability matrix, each line means a result list for a predictor
            probs, prob_matrix = self.__expert.predict(adv_inputs)
            # print '------------------------------------------'
            # print 'probs: ', probs
            max_index = probs.index(max(probs))
            # print 'max index: ', max_index
            good_sample = adv_samples[max_index]
            good_sample.set_fitness(obj_fct(good_sample.get_features()))

            if good_sample.get_fitness() < self.__optimal.get_fitness():
                truth_label = 1
            else:
                truth_label = 0

            self.weights = self.__expert.update_weights(
                np.array(prob_matrix)[:, max_index].T, truth_label)

            self.online_update(good_sample)

            # ------------------------------------------------------
            # print 'after updating------------'
            # self.show_pos_pop()
            # self.show_pop()
            # ------------------------------------------------------

            self.update_optimal()
        # print 'average sample times of each sample:', float(sample_count) / self.__budget
        # print 'average shrink times of each sample:', float(all_dist_count) / sample_count

        return log_buffer
示例#9
0
class RacosOptimization:
    def __init__(self, dim):

        self.__Pop = []  # population set
        self.__PosPop = []  # positive sample set
        self.__Optimal = []  # the best sample so far
        self.__NextPop = []  # the next population set
        self.__region = []  # the region of model
        self.__label = []  # the random label, if true random in this dimension
        self.__SampleSize = 0  # the instance number of sampling in an iteration
        self.__MaxIteration = 0  # the number of iterations
        self.__Budget = 0  # budget of evaluation
        self.__PositiveNum = 0  # the set size of PosPop
        self.__RandProbability = 0  # the probability of sample in model
        self.__UncertainBits = 0  # the dimension size that is sampled randomly
        self.__OnlineSwitch = False
        self.__dimension = dim

        for i in range(dim.getSize()):
            reg = []
            reg.append(0)
            reg.append(0)
            self.__region.append(reg)
            self.__label.append(True)

        self.__ro = RandomOperator()
        return

    def OnlineTurnOn(self):
        self.__OnlineSwitch = True

    def OnlineTurnOff(self):
        self.__OnlineSwitch = False

    def Clear(self):
        self.__Pop = []
        self.__PosPop = []
        self.__Optimal = []
        self.__NextPop = []
        return

    # Parameters setting
    def setParameters(self, ss, mt, pn, rp, ub):
        self.__SampleSize = ss
        if self.__OnlineSwitch is False:
            self.__MaxIteration = mt
        else:
            self.__Budget = mt
        self.__PositiveNum = pn
        self.__RandProbability = rp
        self.__UncertainBits = ub
        return

    # -------------------------------------------------------
    # some test function
    def ShowPop(self, fea):
        print '----Pop----'
        for i in range(self.__SampleSize):
            if fea is True:
                print self.__Pop[i].getFeatures(
                ), ':', self.__Pop[i].getFitness()
            else:
                print 'fitness:', self.__Pop[i].getFitness()
        return

    def ShowNextPop(self, fea):
        print '----NextPop----'
        for i in range(self.__SampleSize):
            if fea is True:
                print self.__NextPop[i].getFeatures(
                ), ':', self.__NextPop[i].getFitness()
            else:
                print 'fitness:', self.__NextPop[i].getFitness()
        return

    def ShowPosPop(self, fea):
        print '----PosPop----'
        for i in range(self.__PositiveNum):
            if fea is True:
                print self.__PosPop[i].getFeatures(
                ), ':', self.__PosPop[i].getFitness()
            else:
                print 'fitness:', self.__PosPop[i].getFitness()
        return

    def ShowRegion(self):
        print '----Region----'
        for i in range(self.__dimension.getSize):
            print 'dimension', i, '[', self.__region[i][0], ',', self.__region[
                i][1], ']'
        return

    def ShowLabel(self):
        print self.__label
        return

    # test function end
    # ----------------------------------------------------------------

    # Return optimal
    def getOptimal(self):
        return self.__Optimal

    # Generate an instance randomly
    def RandomInstance(self, dim, region, label):
        inst = Instance(dim)
        for i in range(dim.getSize()):
            if label[i] is True:
                if dim.getType(i) is True:
                    inst.setFeature(
                        i,
                        self.__ro.getUniformDouble(region[i][0], region[i][1]))
                else:
                    inst.setFeature(
                        i,
                        self.__ro.getUniformInteger(region[i][0],
                                                    region[i][1]))
        return inst

    # generate an instance randomly
    def PosRandomInstance(self, dim, region, label, pos):
        ins = Instance(dim)
        for i in range(dim.getSize()):
            if label[i] is False:
                if dim.getType(i) is True:
                    ins.setFeature(
                        i,
                        self.__ro.getUniformDouble(region[i][0], region[i][1]))
                else:
                    ins.setFeature(
                        i,
                        self.__ro.getUniformInteger(region[i][0],
                                                    region[i][1]))
            else:
                ins.setFeature(i, pos.getFeature(i))
        return ins

    # reset model
    def ResetModel(self):
        for i in range(self.__dimension.getSize()):
            self.__region[i][0] = self.__dimension.getRegion(i)[0]
            self.__region[i][1] = self.__dimension.getRegion(i)[1]
            self.__label[i] = True
        return

    # If an instance exists in list which is as same as ins, return True
    def InstanceInList(self, ins, list, end):
        for i in range(len(list)):
            if i == end:
                break
            if ins.Equal(list[i]) == True:
                return True
        return False

    # Initialize Pop, PosPop and Optimal
    def Initialize(self, func):
        temp = []

        # sample in original region under uniform distribution
        self.ResetModel()

        for i in range(self.__SampleSize + self.__PositiveNum):
            ins = []
            while (True):
                ins = self.RandomInstance(self.__dimension, self.__region,
                                          self.__label)
                if self.InstanceInList(ins, temp, i) is False:
                    break
            ins.setFitness(func(ins.getFeatures()))
            temp.append(ins)
        # sorted by fitness
        temp.sort(key=lambda instance: instance.getFitness())
        # initialize PosPop and Pop
        i = 0
        while (i < self.__PositiveNum):
            self.__PosPop.append(temp[i])
            i += 1
        while (i < self.__PositiveNum + self.__SampleSize):
            self.__Pop.append(temp[i])
            i += 1
        # initialize optimal
        self.__Optimal = self.__PosPop[0].CopyInstance()
        return

    # Generate model for sample next instance
    def ContinueShrinkModel(self, ins):
        ins_left = self.__SampleSize
        while (ins_left > 0):
            ChosenNeg = self.__ro.getUniformInteger(0, ins_left - 1)
            ChosenDim = self.__ro.getUniformInteger(
                0,
                self.__dimension.getSize() - 1)
            #shrinking
            if (ins.getFeature(ChosenDim) <
                    self.__Pop[ChosenNeg].getFeature(ChosenDim)):
                btemp = self.__ro.getUniformDouble(
                    ins.getFeature(ChosenDim),
                    self.__Pop[ChosenNeg].getFeature(ChosenDim))
                if (btemp < self.__region[ChosenDim][1]):
                    self.__region[ChosenDim][1] = btemp
                    i = 0
                    while (i < ins_left):
                        if self.__Pop[i].getFeature(ChosenDim) >= btemp:
                            ins_left = ins_left - 1
                            itemp = self.__Pop[i]
                            self.__Pop[i] = self.__Pop[ins_left]
                            self.__Pop[ins_left] = itemp
                        else:
                            i += 1
            else:
                btemp = self.__ro.getUniformDouble(
                    self.__Pop[ChosenNeg].getFeature(ChosenDim),
                    ins.getFeature(ChosenDim))
                if (btemp > self.__region[ChosenDim][0]):
                    self.__region[ChosenDim][0] = btemp
                    i = 0
                    while (i < ins_left):
                        if self.__Pop[i].getFeature(ChosenDim) <= btemp:
                            ins_left = ins_left - 1
                            itemp = self.__Pop[i]
                            self.__Pop[i] = self.__Pop[ins_left]
                            self.__Pop[ins_left] = itemp
                        else:
                            i += 1

        return

    # Set uncertain bits
    def setUncertainBits(self):
        temp = []
        for i in range(self.__dimension.getSize()):
            temp.append(i)
        for i in range(self.__UncertainBits):
            index = self.__ro.getUniformInteger(
                0,
                self.__dimension.getSize() - i - 1)
            self.__label[temp[index]] = False
            temp.remove(temp[index])
        return

    # Update PosPop list according to new Pop list generated latterly
    def UpdatePosPop(self):
        for i in range(self.__SampleSize):
            j = 0
            while (j < self.__PositiveNum):
                if (self.__Pop[i].getFitness() <
                        self.__PosPop[j].getFitness()):
                    break
                else:
                    j += 1
            if (j < self.__PositiveNum):
                temp = self.__Pop[i]
                self.__Pop[i] = self.__PosPop[self.__PositiveNum - 1]
                k = self.__PositiveNum - 1
                while (k > j):
                    self.__PosPop[k] = self.__PosPop[k - 1]
                    k -= 1
                self.__PosPop[j] = temp
        return

    def OnlineUpdate(self, ins):
        j = 0
        while (j < self.__PositiveNum):
            if (ins.getFitness() < self.__PosPop[j].getFitness()):
                break
            else:
                j += 1
        if (j < self.__PositiveNum):
            temp = ins
            ins = self.__PosPop[self.__PositiveNum - 1]
            k = self.__PositiveNum - 1
            while (k > j):
                self.__PosPop[k] = self.__PosPop[k - 1]
                k -= 1
            self.__PosPop[j] = temp

        j = 0
        while (j < self.__SampleSize):
            if (ins.getFitness() < self.__Pop[j].getFitness()):
                break
            else:
                j += 1
        if (j < self.__SampleSize):
            temp = ins
            ins = self.__Pop[self.__SampleSize - 1]
            k = self.__SampleSize - 1
            while (k > j):
                self.__Pop[k] = self.__Pop[k - 1]
                k -= 1
            self.__Pop[j] = temp

    # Update Optimal
    def UpdateOptimal(self):
        if (self.__Optimal.getFitness() > self.__PosPop[0].getFitness()):
            self.__Optimal = self.__PosPop[0].CopyInstance()
        return

    # If instances in Pop list are not in model, return True
    def Distinguish(self):
        for i in range(self.__SampleSize):
            j = 0
            while (j < self.__dimension.getSize()):
                if (self.__Pop[i].getFeature(j) > self.__region[j][0]) and (
                        self.__Pop[i].getFeature(j) < self.__region[j][1]):
                    j += 1
                else:
                    break
            if (j == self.__dimension.getSize()):
                return False
        return True

    '''
    Racos for continue optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''

    def ContinueOpt(self, func, ss, mt, pn, rp, ub):

        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:
            # no online style
            for itera in range(self.__MaxIteration - 1):

                self.__NextPop = []
                for sam in range(self.__SampleSize):
                    while (True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(
                            0, self.__PositiveNum - 1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if (Gsample <= self.__RandProbability):
                            self.ContinueShrinkModel(self.__PosPop[ChosenPos])
                            self.setUncertainBits()
                        ins = self.PosRandomInstance(self.__dimension,
                                                     self.__region,
                                                     self.__label,
                                                     self.__PosPop[ChosenPos])
                        if ((self.InstanceInList(ins, self.__PosPop,
                                                 self.__PositiveNum) is False)
                                and (self.InstanceInList(
                                    ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            #Online style
            BudCount = self.__SampleSize + self.__PositiveNum
            while BudCount < self.__Budget:
                #        print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(
                        0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        self.ContinueShrinkModel(self.__PosPop[ChosenPos])
                        self.setUncertainBits()
                    ins = self.PosRandomInstance(self.__dimension,
                                                 self.__region, self.__label,
                                                 self.__PosPop[ChosenPos])
                    if ((self.InstanceInList(ins, self.__PosPop,
                                             self.__PositiveNum) is False)
                            and (self.InstanceInList(
                                ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break

                self.OnlineUpdate(ins)
                self.UpdateOptimal()

        return

    # Distinguish function for discrete optimization
    def DiscreteDistinguish(self, ins, ChosenDim):

        if len(ChosenDim) is 0:
            return 0

        for i in range(self.__SampleSize):
            j = 0
            while j < len(ChosenDim):
                if ins.getFeature(ChosenDim[j]) != self.__Pop[i].getFeature(
                        ChosenDim[j]):
                    break
                j = j + 1
            if j == len(ChosenDim):
                return 0
        return 1

    # PosRandomInstance function for discrete optimization
    def PosRandomDiscreteInstance(self, PosIns, dim, label):
        ins = Instance(dim)

        for i in range(dim.getSize()):
            if label[i] is True:
                ins.setFeature(i, PosIns.getFeature(i))
            else:
                ins.setFeature(
                    i,
                    self.__ro.getUniformInteger(
                        dim.getRegion(i)[0],
                        dim.getRegion(i)[1]))
        return ins

    # ShringModel function for discrete
    def DiscreteShrinkModel(self, ins, dim):

        NonChosenDim = []
        for i in range(dim.getSize()):
            NonChosenDim.append(i)
        ChosenDim = []

        while self.DiscreteDistinguish(ins, ChosenDim) == 0:
            tempDim = NonChosenDim[self.__ro.getUniformInteger(
                0,
                len(NonChosenDim) - 1)]
            ChosenDim.append(tempDim)
            NonChosenDim.remove(tempDim)

        while len(NonChosenDim) > self.__UncertainBits:
            tempDim = NonChosenDim[self.__ro.getUniformInteger(
                0,
                len(NonChosenDim) - 1)]
            ChosenDim.append(tempDim)
            NonChosenDim.remove(tempDim)

        return NonChosenDim

    '''
    RACOS for discrete optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''

    def DiscreteOpt(self, func, ss, mt, pn, rp, ub):
        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:
            for itera in range(self.__MaxIteration - 1):

                self.__NextPop = []

                for sam in range(self.__SampleSize):
                    while (True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(
                            0, self.__PositiveNum - 1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if (Gsample <= self.__RandProbability):
                            NonChosenDim = self.DiscreteShrinkModel(
                                self.__PosPop[ChosenPos], self.__dimension)
                            for i in range(len(NonChosenDim)):
                                self.__label[NonChosenDim[i]] = False
                        ins = self.PosRandomDiscreteInstance(
                            self.__PosPop[ChosenPos], self.__dimension,
                            self.__label)
                        if ((self.InstanceInList(ins, self.__PosPop,
                                                 self.__PositiveNum) is False)
                                and (self.InstanceInList(
                                    ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            BudCount = self.__SampleSize + self.__PositiveNum
            while BudCount < self.__Budget:
                print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(
                        0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        NonChosenDim = self.DiscreteShrinkModel(
                            self.__PosPop[ChosenPos], self.__dimension)
                        for i in range(len(NonChosenDim)):
                            self.__label[NonChosenDim[i]] = False
                    ins = self.PosRandomDiscreteInstance(
                        self.__PosPop[ChosenPos], self.__dimension,
                        self.__label)
                    if ((self.InstanceInList(ins, self.__PosPop,
                                             self.__PositiveNum) is False)
                            and (self.InstanceInList(
                                ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break

                self.OnlineUpdate(ins)
                self.UpdateOptimal()
        return

    # Distinguish function for mixed optimization
    def MixDistinguish(self, ins):
        for i in range(self.__SampleSize):
            j = 0
            while j < self.__dimension.getSize():
                if self.__dimension.getType(j) is True:
                    if self.__Pop[i].getFeature(j) < self.__region[j][
                            0] or self.__Pop[i].getFeature(
                                j) > self.__region[j][1]:
                        break
                else:
                    if self.__label[j] is False and ins.getFeature(
                            j) != self.__Pop[i].getFeature(j):
                        break
                j += 1
            if j >= self.__dimension.getSize():
                return False
        return True

    # PosRandomInstance for mixed optimization
    def PosRandomMixInstance(self, PosIns, dim, regi, lab):
        ins = Instance(dim)
        for i in range(dim.getSize()):
            if lab[i] is False:
                ins.setFeature(i, PosIns.getFeature(i))
            else:
                if dim.getType(i) is True:  #continue
                    ins.setFeature(
                        i, self.__ro.getUniformDouble(regi[i][0], regi[i][1]))
                else:  #discrete
                    ins.setFeature(
                        i,
                        self.__ro.getUniformInteger(
                            dim.getRegion(i)[0],
                            dim.getRegion(i)[1]))
        return ins

    # ShrinkModel function for mixed optimization
    def MixShrinkModel(self, ins):

        ChosenDim = []
        NonChosenDim = []
        for i in range(self.__dimension.getSize()):
            NonChosenDim.append(i)

        count = 0
        while self.MixDistinguish(ins) is False:
            TempDim = NonChosenDim[self.__ro.getUniformInteger(
                0,
                len(NonChosenDim) - 1)]
            ChosenNeg = self.__ro.getUniformInteger(0, self.__SampleSize - 1)
            if self.__dimension.getType(TempDim) is True:  #continue
                if ins.getFeature(TempDim) < self.__Pop[ChosenNeg].getFeature(
                        TempDim):
                    btemp = self.__ro.getUniformDouble(
                        ins.getFeature(TempDim),
                        self.__Pop[ChosenNeg].getFeature(TempDim))
                    if btemp < self.__region[TempDim][1]:
                        self.__region[TempDim][1] = btemp
                else:
                    btemp = self.__ro.getUniformDouble(
                        self.__Pop[ChosenNeg].getFeature(TempDim),
                        ins.getFeature(TempDim))
                    if btemp > self.__region[TempDim][0]:
                        self.__region[TempDim][0] = btemp
            else:
                ChosenDim.append(TempDim)
                NonChosenDim.remove(TempDim)
                self.__label[TempDim] = False

            count += 1

        while len(NonChosenDim) > self.__UncertainBits:
            TempDim = NonChosenDim[self.__ro.getUniformInteger(
                0,
                len(NonChosenDim) - 1)]
            ChosenDim.append(TempDim)
            NonChosenDim.remove(TempDim)
            self.__label[TempDim] = False

        return

    '''
    RACOS for mixed optimization
    param:
        func: objective function name
        ss:   sample size
        mt:   max iteration size
        pn:   positive instance size
        rp:   the probability of sampling in model randomly
        ub:   uncertain bits
    '''

    def MixOpt(self, func, ss, mt, pn, rp, ub):
        self.Clear()
        self.setParameters(ss, mt, pn, rp, ub)
        self.ResetModel()
        self.Initialize(func)

        if self.__OnlineSwitch is False:

            for itera in range(self.__MaxIteration - 1):

                self.__NextPop = []

                # self.ShowPosPop(True)
                # self.ShowPop(True)

                for sam in range(self.__SampleSize):
                    while (True):
                        self.ResetModel()
                        ChosenPos = self.__ro.getUniformInteger(
                            0, self.__PositiveNum - 1)
                        Gsample = self.__ro.getUniformDouble(0, 1)
                        if (Gsample <= self.__RandProbability):
                            # print 'begin shrinking!'
                            self.MixShrinkModel(self.__PosPop[ChosenPos])
                        ins = self.PosRandomMixInstance(
                            self.__PosPop[ChosenPos], self.__dimension,
                            self.__region, self.__label)
                        if ((self.InstanceInList(ins, self.__PosPop,
                                                 self.__PositiveNum) is False)
                                and (self.InstanceInList(
                                    ins, self.__NextPop, sam) is False)):
                            ins.setFitness(func(ins.getFeatures()))
                            break
                    self.__NextPop.append(ins)
                self.__Pop = []
                for i in range(self.__SampleSize):
                    self.__Pop.append(self.__NextPop[i])

                self.UpdatePosPop()
                self.UpdateOptimal()
        else:
            #online style
            BudCount = self.__SampleSize + self.__PositiveNum
            while BudCount < self.__Budget:
                print BudCount, self.__Optimal.getFitness()
                BudCount += 1
                while (True):
                    self.ResetModel()
                    ChosenPos = self.__ro.getUniformInteger(
                        0, self.__PositiveNum - 1)
                    Gsample = self.__ro.getUniformDouble(0, 1)
                    if (Gsample <= self.__RandProbability):
                        # print 'begin shrinking!'
                        self.MixShrinkModel(self.__PosPop[ChosenPos])
                    ins = self.PosRandomMixInstance(self.__PosPop[ChosenPos],
                                                    self.__dimension,
                                                    self.__region,
                                                    self.__label)
                    if ((self.InstanceInList(ins, self.__PosPop,
                                             self.__PositiveNum) is False)
                            and (self.InstanceInList(
                                ins, self.__Pop, self.__SampleSize) is False)):
                        ins.setFitness(func(ins.getFeatures()))
                        break
                self.OnlineUpdate(ins)
                self.UpdateOptimal()

        return
示例#10
0
class RacosOptimization:
    def __init__(self, dimension):

        self.__pop = []  # population set
        self.__pos_pop = []  # positive sample set
        self.__optimal = None  # the best sample so far
        self.__region = []  # the region of model
        self.__label = []  # the random label, if true random in this dimension
        self.__sample_size = 0  # the instance size of sampling in an iteration
        self.__budget = 0  # budget of evaluation
        self.__positive_num = 0  # positive sample set size
        self.__rand_probability = 0.0  # the probability of sampling in model
        self.__uncertain_bit = 0  # the dimension size of sampling randomly
        self.__dimension = dimension

        # sampling saving
        self.__model_ins = []  # positive sample used to modeling
        self.__negative_set = []  # negative set used to modeling
        self.__new_ins = []  # new sample
        self.__sample_label = []  # the label of each sample

        for i in range(self.__dimension.get_size()):
            region = [0.0, 0.0]
            self.__region.append(region)
            self.__label.append(0)

        self.__ro = RandomOperator()
        return

    # --------------------------------------------------
    # debugging
    # print positive set
    def show_pos_pop(self):
        print('positive set:------------------')
        for i in range(self.__positive_num):
            self.__pos_pop[i].show_instance()
        print('-------------------------------')
        return

    # print( negative set
    def show_pop(self):
        print('negative set:------------------')
        for i in range(self.__sample_size):
            self.__pop[i].show_instance()
        print('-------------------------------')
        return

    # print optimal
    def show_optimal(self):
        print('optimal:-----------------------')
        self.__optimal.show_instance()
        print('-------------------------------')

    # print region
    def show_region(self):
        print('region:------------------------')
        for i in range(self.__dimension.get_size()):
            print('dimension ', i, ' [', self.__region[i][0], ',',
                  self.__region[i][1], ']')
        print('-------------------------------')
        return

    # print label
    def show_label(self):
        print('label:-------------------------')
        print(self.__label)
        print('-------------------------------')

    # --------------------------------------------------

    # clear sampling log
    def log_clear(self):
        self.__model_ins = []
        self.__negative_set = []
        self.__new_ins = []
        self.__sample_label = []
        return

    # get log
    def get_log(self):
        return self.__model_ins, self.__negative_set, self.__new_ins, self.__sample_label

    # generate environment that should be logged
    # return is 2-d list
    def generate_environment(self, ins, new_ins):

        trajectory = []
        if True:
            array_best = []
            for i in range(len(ins.get_features())):
                array_best.append(ins.get_features()[i])
            new_sam = []
            for i in range(len(new_ins.get_features())):
                new_sam.append(new_ins.get_features()[i])
            sorted_neg = sorted(self.__pop,
                                key=lambda instance: instance.get_fitness())
            for i in range(self.__sample_size):
                trajectory.append(sorted_neg[i].get_features())

        return array_best, trajectory, new_sam

    # clear parameters of saving
    def clear(self):
        self.__pop = []
        self.__pos_pop = []
        self.__optimal = None
        return

    # parameters setting
    def set_parameters(self, ss=0, bud=0, pn=0, rp=0.0, ub=0):
        self.__sample_size = ss
        self.__budget = bud
        self.__positive_num = pn
        self.__rand_probability = rp
        self.__uncertain_bit = ub
        return

    # get optimal
    def get_optimal(self):
        return self.__optimal

    # generate an instance randomly
    def random_instance(self, dim, region, label):
        ins = Instance(dim)
        for i in range(dim.get_size()):
            if label[i] is True:
                if dim.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
        return ins

    # generate an instance based on a positive sample
    def pos_random_instance(self, dim, region, label, pos_instance):
        ins = Instance(dim)
        for i in range(dim.get_size()):
            if label[i] is False:
                if dim.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
            else:
                ins.set_feature(i, pos_instance.get_feature(i))
        return ins

    # reset model
    def reset_model(self):
        for i in range(self.__dimension.get_size()):
            self.__region[i][0] = self.__dimension.get_region(i)[0]
            self.__region[i][1] = self.__dimension.get_region(i)[1]
            self.__label[i] = True
        return

    # if an instance exists in a list, return true
    def instance_in_list(self, ins, this_list, end):
        for i in range(len(this_list)):
            if i == end:
                break
            if ins.equal(this_list[i]) is True:
                return True
        return False

    # initialize pop, pos_pop, optimal
    def initialize(self, func):

        temp = []

        self.reset_model()

        # sample in original region under uniform distribution
        for i in range(self.__sample_size + self.__positive_num):
            while True:
                ins = self.random_instance(self.__dimension, self.__region,
                                           self.__label)
                if self.instance_in_list(ins, temp, i) is False:
                    break
            ins.set_fitness(func(ins.get_features()))
            temp.append(ins)

        # sorted by fitness
        temp.sort(key=lambda instance: instance.get_fitness())

        # initialize pos_pop
        for i in range(self.__positive_num):
            self.__pos_pop.append(temp[i])

        # initialize pop
        for i in range(self.__sample_size):
            self.__pop.append(temp[self.__positive_num + i])

        # initialize optimal
        self.__optimal = self.__pos_pop[0].copy_instance()

        return

    # distinguish function for mixed optimization
    def distinguish(self, exa, neg_set):
        for i in range(self.__sample_size):
            j = 0
            while j < self.__dimension.get_size():
                if self.__dimension.get_type(
                        j) == 0 or self.__dimension.get_type(j) == 1:
                    if neg_set[i].get_feature(
                            j) < self.__region[j][0] or neg_set[i].get_feature(
                                j) > self.__region[j][1]:
                        break
                else:
                    if self.__label[j] is False and exa.get_feature(
                            j) != neg_set[i].get_feature(j):
                        break
                j += 1
            if j >= self.__dimension.get_size():
                return False
        return True

    # update positive set and negative set using a new sample by online strategy
    def online_update(self, ins):

        # update positive set
        j = 0
        while j < self.__positive_num:
            if ins.get_fitness() < self.__pos_pop[j].get_fitness():
                break
            else:
                j += 1

        if j < self.__positive_num:
            temp = ins
            ins = self.__pos_pop[self.__positive_num - 1]
            k = self.__positive_num - 1
            while k > j:
                self.__pos_pop[k] = self.__pos_pop[k - 1]
                k -= 1
            self.__pos_pop[j] = temp

        # update negative set
        j = 0
        while j < self.__sample_size:
            if ins.get_fitness() < self.__pop[j].get_fitness():
                break
            else:
                j += 1
        if j < self.__sample_size:
            temp = ins
            ins = self.__pop[self.__sample_size - 1]
            k = self.__sample_size - 1
            while k > j:
                self.__pop[k] = self.__pop[k - 1]
                k -= 1
            self.__pop[j] = temp

        return

    # update optimal
    def update_optimal(self):
        if self.__pos_pop[0].get_fitness() < self.__optimal.get_fitness():
            self.__optimal = self.__pos_pop[0].copy_instance()
        return

    # generate instance randomly based on positive sample for mixed optimization
    def pos_random_mix_isntance(self, exa, region, label):
        ins = Instance(self.__dimension)
        for i in range(self.__dimension.get_size()):
            if label[i] is False:
                ins.set_feature(i, exa.get_feature(i))
            else:
                # float random
                if self.__dimension.get_type(i) == 0:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_double(region[i][0],
                                                     region[i][1]))
                # integer random
                elif self.__dimension.get_type(i) == 1:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(region[i][0],
                                                      region[i][1]))
                # categorical random
                else:
                    ins.set_feature(
                        i,
                        self.__ro.get_uniform_integer(
                            self.__dimension.get_region(i)[0],
                            self.__dimension.get_region(i)[1]))
        return ins

    # generate model based on mixed optimization for next sampling
    # label[i] = false means this dimension should be set as the value in same dimension of positive sample
    def shrink_model(self, exa, neg_set):

        dist_count = 0

        chosen_dim = []
        non_chosen_dim = [i for i in range(self.__dimension.get_size())]

        remain_neg = [i for i in range(self.__sample_size)]

        while len(remain_neg) != 0:

            dist_count += 1

            temp_dim = non_chosen_dim[self.__ro.get_uniform_integer(
                0,
                len(non_chosen_dim) - 1)]
            chosen_neg = self.__ro.get_uniform_integer(0, len(remain_neg) - 1)
            # float dimension shrink
            if self.__dimension.get_type(temp_dim) == 0:
                if exa.get_feature(temp_dim) < neg_set[
                        remain_neg[chosen_neg]].get_feature(temp_dim):
                    temp_v = self.__ro.get_uniform_double(
                        exa.get_feature(temp_dim),
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim))
                    if temp_v < self.__region[temp_dim][1]:
                        self.__region[temp_dim][1] = temp_v
                else:
                    temp_v = self.__ro.get_uniform_double(
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim),
                        exa.get_feature(temp_dim))
                    if temp_v > self.__region[temp_dim][0]:
                        self.__region[temp_dim][0] = temp_v
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(temp_dim) < self.__region[temp_dim][0] or \
                            neg_set[remain_neg[r_i]].get_feature(temp_dim) > self.__region[temp_dim][1]:
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1
            # integer dimension shrink
            elif self.__dimension.get_type(temp_dim) == 1:
                if exa.get_feature(temp_dim) < neg_set[
                        remain_neg[chosen_neg]].get_feature(temp_dim):
                    temp_v = self.__ro.get_uniform_integer(
                        exa.get_feature(temp_dim),
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim) -
                        1)
                    if temp_v < self.__region[temp_dim][1]:
                        self.__region[temp_dim][1] = temp_v
                else:
                    temp_v = self.__ro.get_uniform_integer(
                        neg_set[remain_neg[chosen_neg]].get_feature(temp_dim) -
                        1, exa.get_feature(temp_dim))
                    if temp_v > self.__region[temp_dim][0]:
                        self.__region[temp_dim][0] = temp_v
                if self.__region[temp_dim][0] == self.__region[temp_dim][1]:
                    chosen_dim.append(temp_dim)
                    non_chosen_dim.remove(temp_dim)
                    self.__label[temp_dim] = False
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(temp_dim) < self.__region[temp_dim][0] or \
                                    neg_set[remain_neg[r_i]].get_feature(temp_dim) > self.__region[temp_dim][1]:
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1
            # categorical
            else:
                chosen_dim.append(temp_dim)
                non_chosen_dim.remove(temp_dim)
                self.__label[temp_dim] = False
                r_i = 0
                while r_i < len(remain_neg):
                    if neg_set[remain_neg[r_i]].get_feature(
                            temp_dim) != exa.get_feature(temp_dim):
                        remain_neg.remove(remain_neg[r_i])
                    else:
                        r_i += 1

        while len(non_chosen_dim) > self.__uncertain_bit:
            temp_dim = non_chosen_dim[self.__ro.get_uniform_integer(
                0,
                len(non_chosen_dim) - 1)]
            chosen_dim.append(temp_dim)
            non_chosen_dim.remove(temp_dim)
            self.__label[temp_dim] = False

        return dist_count

    # sequential Racos for mixed optimization
    # the dimension type includes float, integer and categorical
    def mix_opt(self, obj_fct=None, ss=2, bud=20, pn=1, rp=0.95, ub=1):

        sample_count = 0
        all_dist_count = 0

        # initialize sample set
        self.clear()
        self.log_clear()
        self.set_parameters(ss=ss, bud=bud, pn=pn, rp=rp, ub=ub)
        self.reset_model()
        self.initialize(obj_fct)

        # ------------------------------------------------------
        # print 'after initialization------------'
        # self.show_pos_pop()
        # self.show_pop()
        # ------------------------------------------------------

        # optimization
        budget_c = self.__sample_size + self.__positive_num
        while budget_c < self.__budget:
            budget_c += 1
            if budget_c % 10 == 0:
                # print '======================================================'
                print('budget ', budget_c, ':', self.__optimal.get_fitness())
                # self.__optimal.show_instance()
            while True:
                self.reset_model()
                chosen_pos = self.__ro.get_uniform_integer(
                    0, self.__positive_num - 1)
                model_sample = self.__ro.get_uniform_double(0.0, 1.0)
                if model_sample <= self.__rand_probability:
                    dc = self.shrink_model(self.__pos_pop[chosen_pos],
                                           self.__pop)
                    all_dist_count += dc

                # -----------------------------------------------------------
                # self.show_region()
                # self.show_label()
                # -----------------------------------------------------------

                ins = self.pos_random_mix_isntance(self.__pos_pop[chosen_pos],
                                                   self.__region, self.__label)

                sample_count += 1

                if (self.instance_in_list(ins, self.__pos_pop,
                                          self.__positive_num) is
                        False) and (self.instance_in_list(
                            ins, self.__pop, self.__sample_size) is False):
                    ins.set_fitness(obj_fct(ins.get_features()))

                    # logging
                    model_instance, neg_set, new_instance = self.generate_environment(
                        self.__pos_pop[chosen_pos], ins)
                    self.__model_ins.append(model_instance)
                    self.__negative_set.append(neg_set)
                    self.__new_ins.append(new_instance)

                    if ins.get_fitness() < self.__optimal.get_fitness():
                        self.__sample_label.append(1)
                    else:
                        self.__sample_label.append(0)
                    # ------------------------------------------------------
                    # print 'new sample:-------------------'
                    # ins.show_instance()
                    # print '------------------------------'
                    # ------------------------------------------------------

                    break
            self.online_update(ins)

            # ------------------------------------------------------
            # print 'after updating------------'
            # self.show_pos_pop()
            # self.show_pop()
            # ------------------------------------------------------

            self.update_optimal()
        # print 'average sample times of each sample:', float(sample_count) / self.__budget
        # print 'average shrink times of each sample:', float(all_dist_count) / sample_count
        return