示例#1
0
    def auto_set(self, budget):
        """
        Set train_size, positive_size, negative_size by following rules:
            budget < 3 --> error;
            budget: 4-50 --> train_size = 4, positive_size = 1;
            budget: 51-100 --> train_size = 6, positive_size = 1;
            budget: 101-1000 --> train_size = 12, positive_size = 2;
            budget > 1001 --> train_size = 22, positive_size = 2;

        :param budget: number of calls to the objective function
        :return: no return value
        """

        if budget < 3:
            ToolFunction.log('parameter.py: budget too small')
            sys.exit(1)
        elif budget <= 50:
            self.__train_size = 4
            self.__positive_size = 1
        elif budget <= 100:
            self.__train_size = 6
            self.__positive_size = 1
        elif budget <= 1000:
            self.__train_size = 12
            self.__positive_size = 2
        else:
            self.__train_size = 22
            self.__positive_size = 2
        self.__negative_size = self.__train_size - self.__positive_size
示例#2
0
    def distinct_sample_classifier(self,
                                   classifier,
                                   check_distinct=True,
                                   data_num=0):
        """
        Sample a distinct solution from a classifier.
        """

        x = classifier.rand_sample()
        ins = self._objective.construct_solution(x)
        times = 1
        distinct_flag = True
        if check_distinct is True:
            while self.is_distinct(self._positive_data, ins) is False or \
                    self.is_distinct(self._negative_data, ins) is False:
                x = classifier.rand_sample()
                ins = self._objective.construct_solution(x)
                times += 1
                if times % 10 == 0:
                    if times == 10:
                        space = classifier.get_sample_space()
                    limited, number = space.limited_space()
                    if limited is True:
                        if number <= data_num:
                            ToolFunction.log(
                                'racos_common: WARNING -- sample space has been fully enumerated. Stop early'
                            )
                            return None, None
                    if times > 100:
                        distinct_flag = False
                        break
        return ins, distinct_flag
示例#3
0
 def set_region(self, index, reg, ty):
     if index > self._size - 1:
         ToolFunction.log('dimension.py: index out of bound')
         return
     self._regions[index] = reg
     self._types[index] = ty
     return
示例#4
0
    def distinct_sample_from_set(self,
                                 dim,
                                 set,
                                 check_distinct=True,
                                 data_num=0):
        """
        Sample a distinct solution(compared with solutions in set) from dim.

        :param dim: a Dimension object
        :param set: a list containing other solutions
        :param check_distinct: whether to check the sampled solution is distinct
        :param data_num: the maximum number to sample
        :return: sampled solution and distinct_flag(True if distinct)
        """
        objective = self._objective
        x = objective.construct_solution(dim.rand_sample())
        times = 1
        distinct_flag = True
        if check_distinct is True:
            while self.is_distinct(set, x) is False:
                x = objective.construct_solution(dim.rand_sample())
                times += 1
                if times % 10 == 0:
                    limited, number = dim.limited_space()
                    if limited is True:
                        if number <= data_num:
                            ToolFunction.log(
                                'racos_common.py: WARNING -- sample space has been fully enumerated. Stop early'
                            )
                            return None, None
                    if times > 100:
                        distinct_flag = False
                        break
        return x, distinct_flag
示例#5
0
    def min(objective, parameter):
        """
        Minimization function.

        :param objective: an Objective object
        :param parameter: a Parameter object
        :return: the result of the optimization
        """
        objective.parameter_set(parameter)
        Opt.set_global(parameter)
        constraint = objective.get_constraint()
        algorithm = parameter.get_algorithm()
        if algorithm:
            algorithm = algorithm.lower()
        result = None
        if constraint is not None and ((algorithm is None) or (algorithm == "poss")):
            optimizer = ParetoOptimization()
        elif constraint is None and ((algorithm is None) or (algorithm == "racos") or (algorithm == "sracos")) or (algorithm == "ssracos"):
            optimizer = RacosOptimization()
        else:
            ToolFunction.log(
                "opt.py: No proper algorithm found for %s" % algorithm)
            return result
        if objective.get_reducedim() is True:
            sre = SequentialRandomEmbedding(objective, parameter, optimizer)
            result = sre.opt()
        else:
            result = optimizer.opt(objective, parameter)
        return result
示例#6
0
    def print_sample_region(self):
        """
        Print sample region.

        :return: no return value
        """
        ToolFunction.log('------print sample region------')
        ToolFunction.log(self.__sample_region)
示例#7
0
    def print_neg(self):
        """
        Print negative population.

        :return: no return value
        """
        ToolFunction.log('------print neg------')
        for x in self.__negative_solution:
            x.print_solution()
示例#8
0
    def print_solution_set(sol_set):
        """
        Print the value of each solution in an solution set.

        :param sol_set: solution set
        :return: no return value
        """
        for sol in sol_set:
            ToolFunction.log('value: %f' % (sol.get_value()))
        return
示例#9
0
 def print_dim(self):
     """
     Print the dimension information.
     :return: no return value
     """
     ToolFunction.log('dim size: %d' % self._size)
     ToolFunction.log('dim regions is:')
     ToolFunction.log(self._regions)
     ToolFunction.log('dim types is:')
     ToolFunction.log(self._types)
示例#10
0
    def print_pos(self):
        """
        Print positive population.

        :return: no return value
        """

        ToolFunction.log('------print pos------')
        for x in self.__positive_solution:
            x.print_solution()
示例#11
0
    def judge_match(size, regs, tys):
        """
        Check if the size of regs and tys are both the same as self._size.

        :return: True or False
        """
        if size != len(regs) or size != len(tys):
            ToolFunction.log('dimension.py: dimensions do not match')
            return False
        else:
            return True
示例#12
0
    def show_best_solution(self, intermediate_print=False, times=0, freq=100):
        """
        Show intermediate best solutions every 'freq' evaluation.

        :param intermediate_print: whether to show
        :param times: current iteration time
        :param freq: frequency
        :return: no return value
        """
        if intermediate_print is True and times % freq == 0:
            ToolFunction.log(("budget %d, fx result: " % times) +
                             str(self._best_solution.get_value()))
            ToolFunction.log("x: " + str(self._best_solution.get_x()))
示例#13
0
    def result_analysis(results, top):
        """
        Get mean value and standard deviation of best 'top' results.

        :param results: a list of results
        :param top: the number of best results used to calculate mean value and standard deviation
        :return: mean value and standard deviation of best 'top' results
        """
        limit = top if top < len(results) else len(results)
        results.sort()
        top_k = results[0:limit]
        mean_r = np.mean(top_k, axis=0, dtype=np.float64)
        std_r = np.std(top_k, axis=0, dtype=np.float64)
        if limit <= 1:
            ToolFunction.log('Best %d result: %s +- %s' % (limit, mean_r, std_r))
        else:
            ToolFunction.log('Best %d results: %s +- %s' % (limit, mean_r, std_r))
        return mean_r, std_r
示例#14
0
    def min(objective, parameter, repeat=1, best_n=None, plot=False, plot_file=None, seed=None):
        """
        Minimization function.

        :param objective: an Objective object
        :param parameter: a Parameter object
        :param repeat: integer, repeat times of the optimization
        :param best_n:
            integer, ExpOpt.min will print average value and standard deviation of best_n optimal results among
            returned solution list.
        :param plot: whether to plot regret curve during the optimization
        :param plot_file: the file name to output the figure
        :param seed: random seed of the optimization
        :return: a best_solution set
        """
        objective.parameter_set(parameter)
        ret = []
        if best_n is None:
            best_n = repeat
        if seed is not None:
            gl.set_seed(seed)  # set random seed
        result = []
        for i in range(repeat):
            # perform the optimization
            solution = Opt.min(objective, parameter)
            ret.append(solution)
            ToolFunction.log('solved solution is:')
            solution.print_solution()
            # store the optimization result
            result.append(solution.get_value())

            # for plotting the optimization history
            history = np.array(objective.get_history_bestsofar())  # init for reducing
            if plot is True:
                plt.plot(history)
            objective.clean_history()
        if plot is True:
            if plot_file is not None:
                plt.savefig(plot_file)
            else:
                plt.show()
        ExpOpt.result_analysis(result, best_n)
        return ret
示例#15
0
    def init_attribute(self):
        """
        Init self._data, self._positive_data, self._negative_data by sampling.

        :return: no return value
        """
        self._parameter.set_negative_size(self._parameter.get_train_size() -
                                          self._parameter.get_positive_size())
        # check if the initial solutions have been set
        data_temp = self._parameter.get_init_samples()
        i = 0
        iteration_num = self._parameter.get_train_size()
        if data_temp is not None and self._best_solution is None:
            size = len(data_temp)
            if iteration_num < size:
                size = iteration_num
            for j in range(size):
                if isinstance(data_temp[j], Solution) is False:
                    x = self._objective.construct_solution(data_temp[j])
                else:
                    x = data_temp[j]
                if math.isnan(x.get_value()):
                    self._objective.eval(x)
                self._data.append(x)
                ToolFunction.log("init solution %s, value: %s" %
                                 (i, x.get_value()))
                i += 1
        # otherwise generate random solutions

        while i < iteration_num:
            # distinct_flag: True means sample is distinct(can be use),
            # False means sample is distinct, you should sample again.
            x, distinct_flag = self.distinct_sample_from_set(
                self._objective.get_dim(), self._data, data_num=iteration_num)
            # panic stop
            if x is None:
                break
            if distinct_flag:
                self._objective.eval(x)
                self._data.append(x)
                i += 1
        self.selection()
        return
示例#16
0
    def opt(self):
        """
        Sequential random embedding optimization.

        :return: the best solution of the optimization
        """

        dim = self.__objective.get_dim()
        res = []
        iteration = self.__parameter.get_num_sre()
        new_obj = copy.deepcopy(self.__objective)
        new_par = copy.deepcopy(self.__parameter)
        new_par.set_budget(
            math.floor(self.__parameter.get_budget() / iteration))
        new_obj.set_last_x(Solution(x=[0]))
        for i in range(iteration):
            ToolFunction.log('sequential random embedding %d' % i)
            new_obj.set_A(
                np.sqrt(self.__parameter.get_variance_A()) * np.random.randn(
                    dim.get_size(),
                    self.__parameter.get_low_dimension().get_size()))
            new_dim = Dimension.merge_dim(
                self.__parameter.get_withdraw_alpha(),
                self.__parameter.get_low_dimension())
            new_obj.set_dim(new_dim)
            result = self.__optimizer.opt(new_obj, new_par)
            x = result.get_x()
            x_origin = x[0] * np.array(new_obj.get_last_x().get_x()) + np.dot(
                new_obj.get_A(), np.array(x[1:]))
            sol = Solution(x=x_origin, value=result.get_value())
            new_obj.set_last_x(sol)
            res.append(sol)
        best_sol = res[0]
        for i in range(len(res)):
            if res[i].get_value() < best_sol.get_value():
                best_sol = res[i]
        self.__objective.get_history().extend(new_obj.get_history())
        return best_sol
示例#17
0
 def print_solution(self):
     ToolFunction.log('x: ' + repr(self.__x))
     ToolFunction.log('value: ' + repr(self.__value))
示例#18
0
 def print_data(self):
     ToolFunction.log('------print b------')
     ToolFunction.log('the size of b is: %d' % (len(self._data)))
     for x in self._data:
         x.print_solution()
示例#19
0
 def print_negative_data(self):
     ToolFunction.log('------print negative_data------')
     ToolFunction.log('the size of negative_data is: %d' %
                      (len(self._negative_data)))
     for x in self._negative_data:
         x.print_solution()
    def opt(self, objective, parameter, strategy='WR', ub=1):
        """
        SRacos optimization.

        :param objective: an Objective object
        :param parameter: a Parameter object
        :param strategy: replace strategy
        :param ub: uncertain bits, which is a parameter of SRacos
        :return: Optimization result
        """
        self.clear()
        self.set_objective(objective)
        self.set_parameters(parameter)
        self.init_attribute()
        i = 0
        iteration_num = self._parameter.get_budget(
        ) - self._parameter.get_train_size()
        time_log1 = time.time()
        max_distinct_repeat_times = 100
        current_not_distinct_times = 0
        last_best = None
        while i < iteration_num:
            if gl.rand.random() < self._parameter.get_probability():
                classifier = RacosClassification(self._objective.get_dim(),
                                                 self._positive_data,
                                                 self._negative_data, ub)
                classifier.mixed_classification()
                solution, distinct_flag = self.distinct_sample_classifier(
                    classifier, True, self._parameter.get_train_size())
            else:
                solution, distinct_flag = self.distinct_sample(
                    self._objective.get_dim())
            # panic stop
            if solution is None:
                ToolFunction.log(" [break loop] because solution is None")
                return self._best_solution
            if distinct_flag is False:
                current_not_distinct_times += 1
                if current_not_distinct_times >= max_distinct_repeat_times:
                    ToolFunction.log(
                        "[break loop] because distinct_flag is false too much times"
                    )
                    return self._best_solution
                else:
                    continue
            # evaluate the solution
            objective.eval(solution)
            # show best solution
            times = i + self._parameter.get_train_size() + 1
            self.show_best_solution(parameter.get_intermediate_result(), times,
                                    parameter.get_intermediate_freq())
            bad_ele = self.replace(self._positive_data, solution, 'pos')
            self.replace(self._negative_data, bad_ele, 'neg', strategy)
            self._best_solution = self._positive_data[0]
            last_best = self._best_solution.get_value()
            if i == 4:
                time_log2 = time.time()
                expected_time = (self._parameter.get_budget() - self._parameter.get_train_size()) * \
                                (time_log2 - time_log1) / 5
                if self._parameter.get_time_budget() is not None:
                    expected_time = min(expected_time,
                                        self._parameter.get_time_budget())
                if expected_time > 5:
                    m, s = divmod(expected_time, 60)
                    h, m = divmod(m, 60)
                    ToolFunction.log(
                        'expected remaining running time: %02d:%02d:%02d' %
                        (h, m, s))
            # time budget check
            if self._parameter.get_time_budget() is not None:
                if (time.time() -
                        time_log1) >= self._parameter.get_time_budget():
                    ToolFunction.log('time_budget runs out')
                    return self._best_solution
            # terminal_value check
            if self._parameter.get_terminal_value() is not None:
                if self._best_solution.get_value(
                ) <= self._parameter.get_terminal_value():
                    ToolFunction.log('terminal function value reached')
                    return self._best_solution
            i += 1
        return self._best_solution
示例#21
0
    def opt(self, objective, parameter):
        """
        Pareto optimization.

        :param objective: an Objective object
        :param parameter: a Parameter object
        :return: the best solution of the optimization
        """
        isolationFunc = parameter.get_isolationFunc()
        n = objective.get_dim().get_size()

        # initiate the population
        sol = objective.construct_solution(np.zeros(n))
        objective.eval_constraint(sol)

        population = [sol]
        fitness = [sol.get_value()]
        pop_size = 1
        # iteration count
        t = 0
        T = parameter.get_budget()
        while t < T:
            if t == 0:
                time_log1 = time.time()
            # choose a individual from population randomly
            s = population[gl.rand.randint(1, pop_size) - 1]
            # every bit will be flipped with probability 1/n
            offspring_x = self.mutation(s.get_x(), n)
            offspring = objective.construct_solution(offspring_x)
            objective.eval_constraint(offspring)
            offspring_fit = offspring.get_value()
            # now we need to update the population
            hasBetter = False

            for i in range(0, pop_size):
                if isolationFunc(offspring_x) != isolationFunc(
                        population[i].get_x()):
                    continue
                else:
                    if (fitness[i][0] < offspring_fit[0] and fitness[i][1] >= offspring_fit[1]) or \
                            (fitness[i][0] <= offspring_fit[0] and fitness[i][1] > offspring_fit[1]):
                        hasBetter = True
                        break
            # there is no better individual than offspring
            if not hasBetter:
                Q = []
                Qfit = []
                for j in range(0, pop_size):
                    if offspring_fit[0] <= fitness[j][0] and offspring_fit[
                            1] >= fitness[j][1]:
                        continue
                    else:
                        Q.append(population[j])
                        Qfit.append(fitness[j])
                Q.append(offspring)
                Qfit.append(offspring_fit)
                # update fitness
                fitness = Qfit
                # update population
                population = Q
            t += 1
            pop_size = np.shape(fitness)[0]

            # display expected running time
            if t == 5:
                time_log2 = time.time()
                expected_time = T * (time_log2 - time_log1) / 5
                if expected_time > 5:
                    m, s = divmod(expected_time, 60)
                    h, m = divmod(m, 60)
                    ToolFunction.log(
                        'expected remaining running time: %02d:%02d:%02d' %
                        (h, m, s))
        result_index = -1
        max_value = float('inf')
        for p in range(0, pop_size):
            fitness = population[p].get_value()
            if fitness[1] >= 0 and fitness[0] < max_value:
                max_value = fitness[0]
                result_index = p
        return population[result_index]
    def opt(self, objective, parameter):
        """
        Pareto optimization under noise.

        :param objective: an Objective object
        :param parameter:  a Parameters object
        :return: the best solution of the optimization
        """
        isolationFunc = parameter.get_isolationFunc()
        theta = parameter.get_ponss_theta()
        b = parameter.get_ponss_b()
        n = objective.get_dim().get_size()

        # initiate the population
        sol = objective.construct_solution(np.zeros(n))
        objective.eval_constraint(sol)

        population = [sol]
        pop_size = 1
        # iteration count
        t = 0
        T = parameter.get_budget()
        while t < T:
            if t == 0:
                time_log1 = time.time()
            # choose a individual from population randomly
            s = population[gl.rand.randint(1, pop_size) - 1]
            # every bit will be flipped with probability 1/n
            offspring_x = self.mutation(s.get_x(), n)
            offspring = objective.construct_solution(offspring_x)
            objective.eval_constraint(offspring)
            offspring_fit = offspring.get_value()
            # now we need to update the population
            has_better = False

            for i in range(0, pop_size):
                if isolationFunc(offspring_x) != isolationFunc(population[i].get_x()):
                    continue
                else:
                    if self.theta_dominate(theta, population[i], offspring):
                        has_better = True
                        break
            # there is no better individual than offspring
            if not has_better:
                P = []
                Q = []
                for j in range(0, pop_size):
                    if self.theta_weak_dominate(theta, offspring, population[i]):
                        continue
                    else:
                        P.append(population[j])
                P.append(offspring)
                population = P
                for sol in population:
                    if sol.get_value()[1] == offspring.get_value()[1]:
                        Q.append(sol)
                if len(Q) == b + 1:
                    for sol in Q:
                        population.remove(sol)
                    j = 0
                    while j < b:
                        sols = gl.rand.sample(Q, 2)
                        Q.remove(sols[0])
                        Q.remove(sols[1])
                        objective.eval_constraint(sols[0])
                        objective.eval_constraint(sols[1])
                        if sols[0].get_value()[0] < sols[1].get_value()[0]:
                            population.append(sols[0])
                            Q.append(sols[1])
                        else:
                            population.append(sols[1])
                            Q.append(sols[0])
                        j += 1
                        t += 2
            t += 1
            pop_size = len(population)

            # display expected running time
            if t == 5:
                time_log2 = time.time()
                expected_time = T * (time_log2 - time_log1) / 5
                if expected_time > 5:
                    m, s = divmod(expected_time, 60)
                    h, m = divmod(m, 60)
                    ToolFunction.log('expected remaining running time: %02d:%02d:%02d' % (h, m, s))

        result_index = -1
        max_value = float('inf')
        for p in range(pop_size):
            fitness = population[p].get_value()
            if fitness[1] >= 0 and fitness[0] < max_value:
                max_value = fitness[0]
                result_index = p
        return population[result_index]