Beispiel #1
0
 def __init__(self, obj1_id, obj1_instance, obj2_id, obj2_instance, dimension):
     # Query the optimum from the benchmark
     # -------------------------------------
     self.obj1, self.opt1_obj1 = bm.instantiate(obj1_id, iinstance=obj1_instance)
     self.obj2, self.opt2_obj2 = bm.instantiate(obj2_id, iinstance=obj2_instance)
     # Activate the evaluation system to get the true optimum from the self object
     self.fcurrent = np.zeros(2) # store function values
     self.fcurrent[0] = self.obj1.evaluate(np.zeros((1, dimension)))
     self.fcurrent[1] = self.obj2.evaluate(np.zeros((1, dimension)))
     self.opt1_x = self.obj1.xopt
     self.opt2_x = self.obj2.xopt
     self.opt1_obj2 = self.obj2.evaluate(self.opt1_x)
     self.opt2_obj1 = self.obj1.evaluate(self.opt2_x)
Beispiel #2
0
    def __init__(self, dim, fid, iid):
        import bbobbenchmarks
        self.dim = dim
        (self.f, self.fopt) = bbobbenchmarks.instantiate(fid, iinstance=iid)

        self.f(np.zeros(dim))  # dummy eval so that we can grab xopt
        self.optimum = self.f.xopt
Beispiel #3
0
    def finstances(self):
        """
        An iterator that generates all function instances
        to be evaluated.
        """
        for dim in self.dimensions:
            maxfunevals = self.maxfev * dim
            fevs = np.zeros(len(self.function_ids) * len(self.instances))
            fevs_i = 0
            for fun_id in self.function_ids:
                for iinstance in self.instances:
                    self.f.setfun(*bbobbenchmarks.instantiate(fun_id, iinstance=iinstance))
                    yield FInstance(self.f, dim, fun_id, iinstance, maxfunevals)

                    fevs[fevs_i] = self.f.evaluations
                    fevs_i += 1

                print "  % -12s      date and time: %s" % (self.shortname, time.asctime())

            overshoot_idx = np.argmax(fevs)
            overshoot = fevs[overshoot_idx]
            overshoot_f = self.function_ids[int(overshoot_idx / len(self.instances))]
            overshoot_i = self.instances[overshoot_idx % len(self.instances)]
            print (
                "---- % -12s dimension %d-D done ----  (max FEV: %d/%d in f%d:%d)"
                % (self.shortname, dim, overshoot, maxfunevals, overshoot_f, overshoot_i)
            )
Beispiel #4
0
def run_optimizer(optimizer, dim, fID, instance, logfile, lb, ub, max_FEs,
                  data_path, bbob_opt):
    """Parallel BBOB/COCO experiment wrapper
    """
    # Set different seed for different processes
    start = time()
    seed = np.mod(int(start) + os.getpid(), 1000)
    np.random.seed(seed)

    data_path = os.path.join(data_path, str(instance))
    max_FEs = eval(max_FEs)

    f = fgeneric.LoggingFunction(data_path, **bbob_opt)
    f.setfun(*bn.instantiate(fID, iinstance=instance))

    opt = optimizer(dim, f.evalfun, f.ftarget, max_FEs, lb, ub, logfile)
    opt.run()

    f.finalizerun()
    with open(logfile, 'a') as fout:
        fout.write(
            "{} on f{} in {}D, instance {}: FEs={}, fbest-ftarget={:.4e}, "
            "elapsed time [m]: {:.3f}\n".format(optimizer, fID, dim, instance,
                                                f.evaluations,
                                                f.fbest - f.ftarget,
                                                (time() - start) / 60.))
Beispiel #5
0
def main(argv):
    funId = int(argv[0])
    instance = int(argv[1])
    funcArgs = argv[2:]
    parameters = np.array(funcArgs, dtype=float)
    funcInstance = bn.instantiate(funId, instance)
    function = funcInstance[0]
    optVal = funcInstance[1]

    result = function(parameters)
    deltaOpt = (result - optVal)
    output = "result=%f" % deltaOpt
    print(output)
def perform_bbob_benchmarks(iinstances, trials):
    # creating the function object
    f = fgeneric.LoggingFunction(datapath, **opts)

    # cumulative sum of the errors of each 'run' of the heuristic
    result = 0.0

    # for each function instance
    for iinstance in iinstances:

        # for each trial
        for trial in trials:
            # initializing the becnhmark in the selected function
            f.setfun(
                *bn.instantiate(objective_function_id, iinstance=iinstance))

            # running the heuristic
            # _, _, = random_search(f.evalfun, dimension, maxfunevals, box_restr, f.ftarget)
            _, _, = pso(f.evalfun, dimension, maxfunevals, box_restr,
                        f.ftarget)

            # printing information related to each 'run'
            print(
                '  f%d in %d-D, instance %d: FEs=%d, '
                'fbest-ftarget=%.4e, elapsed time [h]: %.2f\n' %
                (objective_function_id, dimension, iinstance, f.evaluations,
                 f.fbest - f.ftarget, (time.time() - t0) / 60. / 60.))

            # there's no need to store the best solution found in each 'run' because the benchmark
            # saves it ('the fitness') in 'f.fbest'. the value of the 'global optimum' found by our
            # heuristic is stored in 'f.ftarget' and the 'error' can be measured by 'f.fbest - f.ftarget'.

            #  computing cumulative sum for each 'run'
            result += f.fbest - f.ftarget

            # finilizing benchmark for the current 'run'
            f.finalizerun()

    return result
Beispiel #7
0
 # Iterate over all desired test dimensions
 for dim in (2, 3, 5, 10, 20, 40):
     # Set the maximum number function evaluation granted to the algorithm
     # This is usually function of the dimensionality of the problem
     maxfuncevals = 100 * dim**2
     minfuncevals = dim + 2
     
     # Iterate over a set of benchmarks (noise free benchmarks here)
     for f_name in bn.nfreeIDs:
         
         # Iterate over all the instance of a single problem
         # Rotation, translation, etc.
         for instance in chain(range(1, 6), range(21, 31)):
             
             # Set the function to be used (problem) in the logger
             e.setfun(*bn.instantiate(f_name, iinstance=instance))
             
             # Independent restarts until maxfunevals or ftarget is reached
             for restarts in range(0, maxrestarts + 1):
                 if restarts > 0:
                     # Signal the experiment that the algorithm restarted
                     e.restart('independent restart')  # additional info
                 
                 # Run the algorithm with the remaining number of evaluations
                 revals = int(math.ceil(maxfuncevals - e.evaluations))
                 main(e.evalfun, dim, revals, e.ftarget)
                 
                 # Stop if ftarget is reached
                 if e.fbest < e.ftarget or e.evaluations + minfuncevals > maxfuncevals:
                     break
             
Beispiel #8
0
    # Iterate over all desired test dimensions
    for dim in (2, 3, 5, 10, 20, 40):
        # Set the maximum number function evaluation granted to the algorithm
        # This is usually function of the dimensionality of the problem
        maxfuncevals = 100 * dim**2
        minfuncevals = dim + 2

        # Iterate over a set of benchmarks (noise free benchmarks here)
        for f_name in bn.nfreeIDs:

            # Iterate over all the instance of a single problem
            # Rotation, translation, etc.
            for instance in chain(list(range(1, 6)), list(range(21, 31))):

                # Set the function to be used (problem) in the logger
                e.setfun(*bn.instantiate(f_name, iinstance=instance))

                # Independent restarts until maxfunevals or ftarget is reached
                for restarts in range(0, maxrestarts + 1):
                    if restarts > 0:
                        # Signal the experiment that the algorithm restarted
                        e.restart('independent restart')  # additional info

                    # Run the algorithm with the remaining number of evaluations
                    revals = int(math.ceil(maxfuncevals - e.evaluations))
                    main(e.evalfun, dim, revals, e.ftarget)

                    # Stop if ftarget is reached
                    if e.fbest < e.ftarget or e.evaluations + minfuncevals > maxfuncevals:
                        break
            fbest = fvalues[idx[0]]
            xbest = xpop[idx[0]]
        if fbest < ftarget:  # task achieved
            break

    return xbest


t0 = time.time()
np.random.seed(int(t0))

f = fgeneric.LoggingFunction(datapath, **opts)
for dim in dimensions:  # small dimensions first, for CPU reasons
    for fun_id in function_ids:
        for iinstance in instances:
            f.setfun(*bbobbenchmarks.instantiate(fun_id, iinstance=iinstance))

            # independent restarts until maxfunevals or ftarget is reached
            for restarts in xrange(maxrestarts + 1):
                if restarts > 0:
                    f.restart('independent restart')  # additional info
                run_optimizer(f.evalfun, dim,
                              eval(maxfunevals) - f.evaluations, f.ftarget)
                if (f.fbest < f.ftarget or
                        f.evaluations + eval(minfunevals) > eval(maxfunevals)):
                    break

            f.finalizerun()

            print(
                '  f%d in %d-D, instance %d: FEs=%d with %d restarts, '
    f.close()
    # Create a COCO experiment that will log the results under the
    # ./output directory
    e = fgeneric.LoggingFunction('output')

    observations = list()
    means = list()
    for i in range(params['qntYears']):
        observation = model.loadModelDB(region + 'jmaData', year + i)
        observation.bins = observation.bins.tolist()
        observations.append(observation)
        means.append(observation.bins)
    # del observation
    mean = np.mean(means, axis=0)
    param = (region, year, params['qntYears'])
    func, opt = bn.instantiate(2, iinstance=1, param=param)
    observation = model.loadModelDB(
        region + 'jmaData', year + params['qntYears'] + 1)
    ftarget = calcLogLikelihood(observation, observation)
    del observation
    e.setfun(func, opt=ftarget)

    gaModel(e.evalfun,
            NGEN=params['NGEN'],
            CXPB=params['CXPB'],
            MUTPB=params['MUTPB'],
            modelOmega=observations,
            year=year +
            params['qntYears'],
            region=region,
            mean=mean,
            xbest = alg.best_solution[0]

        if fbest < ftarget:  # task achieved 
            break

        # compute the next population
        alg.run(1)

    return xbest

timings = []
runs = []
dims = []
for dim in (2, 3, 5, 10, 20, 40, 80, 160):  # 320, 640, 1280, 2560, 5120, 10240, 20480):
    nbrun = 0
    f = fgeneric.LoggingFunction('tmp').setfun(*bn.instantiate(8, 1))
    t0 = time.time()
    while time.time() - t0 < 30: # at least 30 seconds
        run_optimizer(f.evalfun, dim, eval(MAX_FUN_EVALS), f.ftarget)  # adjust maxfunevals
        nbrun = nbrun + 1
    timings.append((time.time() - t0) / f.evaluations)
    dims.append(dim)    # not really needed
    runs.append(nbrun)  # not really needed
    f.finalizerun()
    print '\nDimensions:',
    for i in dims:
        print ' %11d ' % i,
    print '\n      runs:',
    for i in runs:
        print ' %11d ' % i,
    print '\n times [s]:',
Beispiel #12
0
xLimitLower = -5.05
xLimitUpper = 5.05
yLimitLower = -5.05
yLimitUpper = 5.05

#Samplepoints per dimension (remember the total number of points is samplepoints²)
samplepoints = 101

#Range below/above the optimal function value - keep in mind this is minimization!
zLimitBelow = 10 # "empty" space below opt f-value
zLimitAbove = 100 # added range which is shown of the function above the opt f-value
#If you don't care and want automatically determined values for the given X/Y-rectangle
autoZ = True

#########SCRIPT#########
problem, optimalFunValue = bbobbenchmarks.instantiate(ProblemID,1)
#one eval is needed so xopt exists
problem._evalfull(np.array([0,0]))
print('Problem: ' + str(ProblemID))
print('Optimal Solution Vector: ' + str(problem.xopt))
print('Optimal Function Value: ' + str(optimalFunValue))

@np.vectorize
def func(x, y):
    coord = np.array([x-xopt,y-yopt])
    _, funValue = problem._evalfull(coord)
    return funValue
    #This return is much better for some problems
    #return np.log10(funValue - optimalFunValue)

#Generating the global optimum somewhere inside [-4,4]
Beispiel #13
0
def generate_plots(f_id, dim, inst_id, f1_id, f2_id, f1_instance, f2_instance,
                   outputfolder="./", inputfolder=None, tofile=True, downsample=False):
    ##############################################################
    #                                                            #
    # Objective Space of points on cut (log-scale).              #
    #                                                            #
    ##############################################################
    
    fig = plt.figure(1)
    ax = fig.add_subplot(111)
    
    myc = ['g', 'b', 'r', 'y'] # colors for the different line directions
    myls = [':', '--', '-'] # line styles
    mylw = dict(lw=2, alpha=0.6) # line width # ALSO: mylw = {'lw':2, 'alpha':0.9}
    
    
    # define lines as a + t*b
    tlim = 10 # 
    ngrid = 10001
    t = np.linspace(-tlim, tlim, num=ngrid, endpoint=True)
    
    # Query the optimum from the benchmark to get a working objective function:
    # -------------------------------------
    f1, f1opt = bm.instantiate(f1_id, iinstance=f1_instance)
    f2, f2opt = bm.instantiate(f2_id, iinstance=f2_instance)
    
    fdummy = f1.evaluate(np.zeros((1, dim)))    
    xopt1 = f1.xopt # formerly: `f1.arrxopt[0]` but did not work for all functions
    f_xopt1 = [f1opt, f2.evaluate(xopt1)]
    
    fdummy = f2.evaluate(np.zeros((1, dim)))
    xopt2 = f2.xopt # formerly: `f2.arrxopt[0]` but did not work for all functions
    f_xopt2 = [f1.evaluate(xopt2), f2opt]
    
    nadir = np.array([f1.evaluate(xopt2), f2.evaluate(xopt1)])
    ideal = np.array([f1opt, f2opt])
    
    # evaluate points along random directions through single optima:
    #rand_dir_1 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))
    rand_dir_1 = np.array([-2.57577836,  3.03082186, -1.33275642, -0.6939155 ,  0.99631351,
           -0.05842807,  1.99304198,  0.38531151,  1.3697517 ,  0.37240766,
            0.69762214, -0.79613309, -1.45320324, -0.97296174,  0.90871269,
           -1.00793426, -1.29250002,  0.25110439,  0.26014748, -0.1267351 ,
            0.63039621,  0.38236451,  1.07914151,  1.07130862,  0.13733215,
            1.97801217,  0.48601757,  2.3606844 ,  0.30784962, -0.36040267,
            0.68263725, -1.55353407, -0.57503424,  0.07362256,  0.95114969,
            0.43087735, -1.57600655,  0.48304268, -0.88184912,  1.85066177])[0:dim]
    rand_dir_1 = rand_dir_1/np.linalg.norm(rand_dir_1)
    #rand_dir_2 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))
    rand_dir_2 = np.array([0.2493309 , -2.05353785, -1.08038135, -0.06152298, -0.37996052,
           -0.65976313, -0.11217795, -1.41055602,  0.20321651, -1.42727459,
           -0.09742259, -0.26135753, -0.20899801,  0.85056449, -0.58492263,
           -0.93028813, -0.6576416 , -0.02854442, -0.53294699, -0.40898327,
           -0.64209791,  0.62349299, -0.44248805,  0.60715229,  0.97420653,
           -0.40989115,  0.67065727,  0.23491168, -0.0607614 , -0.42400703,
           -1.77536414,  1.92731362,  2.38098092, -0.23789751, -0.02411066,
           -0.37445709,  0.43547281,  0.32148583, -0.4257802 ,  0.15550121])[0:dim]
    rand_dir_2 = rand_dir_2/np.linalg.norm(rand_dir_2)
    rand_dir_3 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))
#    rand_dir_3 = np.array([0.27274996,  0.09450028,  0.23123471, -0.17268026, -0.19352246,
#            0.11116155,  1.91171592, -0.77188094,  0.50033182, -2.93726319,
#           -0.0444466 , -0.83483599, -1.05971685,  0.35220208,  0.67446614,
#           -0.66144976,  0.15873096,  0.63002013, -0.75455445,  0.11553671,
#            0.53268058, -0.17107212, -2.68158842,  1.76162118, -1.10528215,
#           -1.3174873 , -0.56827552,  0.8938743 , -1.40129273,  1.24724136,
#            0.32995442,  1.64754152, -0.23038488, -0.1996612 ,  0.7423728 ,
#            0.41590582, -0.49735973, -0.16317831,  0.14116915,  0.33144299])[0:dim]
#    rand_dir_3 = rand_dir_3/np.linalg.norm(rand_dir_3)    
    rand_dir_4 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))
#    rand_dir_4 = np.array([-1.64810074,  0.06035188, -1.08343971,  0.69871916, -1.57870908,
#            -0.39555544,  1.15952858,  0.82573846, -1.00821565,  0.46347426,
#            0.46817715, -0.70617468, -0.56754204, -1.77903594, -0.15184591,
#            2.10968445,  0.53652335, -0.03221351, -0.34664564,  1.69246492,
#            1.26043695,  0.20284844,  1.90425762, -0.43203046,  0.33297092,
#           -0.43151518, -0.27561938, -0.64456918, -1.52515793,  0.16840333,
#           -1.44740417, -0.07328904, -0.74026773,  0.02869038, -0.65416703,
#            0.55212071, -1.13507935, -1.18781606,  0.42888208, -1.47626463])[0:dim]
    rand_dir_4 = rand_dir_4/np.linalg.norm(rand_dir_4)
        
    
    # now sample two random points
    # rand_x_1 = -4+8*np.random.rand(dim)
    rand_x_1 = np.array([-2.70496645, -0.39106794, -2.80086174, -3.66756864,  2.14644397,
        2.78153367,  1.56329668,  2.35839362,  0.13302063, -2.91032329,
       -2.51556623, -2.35077186,  2.58377453,  1.17508714, -2.4457919 ,
        1.45033066, -1.23112017, -2.25318184,  2.41933833, -1.14164988,
       -2.36275527, -3.25853312, -2.4609917 ,  3.48296483, -2.68189074,
       -2.05345914, -2.4116529 ,  3.08138791, -2.23247829,  2.54796847,
       -0.936912  ,  3.35564688,  0.51737322, -0.92592536,  1.65481046,
       -2.52985307,  3.7431933 , -3.6630677 , -0.40448911,  1.33128767])[0:dim]    
    # rand_x_2 = -4+8*np.random.rand(dim)
    rand_x_2 = np.array([1.57461786, -3.44804825, -3.81020969,  2.83971589,  3.27253056,
       -3.26623201,  3.79526151,  1.76316424,  1.79345621, -0.81215354,
        2.06356913,  1.02657347,  2.99781081,  0.35872047,  3.69835244,
       -1.68708122,  1.84948801, -0.86589091, -1.61500454, -1.03210602,
        3.96363037, -1.30389274,  2.16486049, -2.77809263, -2.78117177,
       -0.89747482,  3.85189385,  2.34298403,  1.45079637,  3.78130948,
        2.55578938,  2.23402556,  0.79451819,  0.30563072,  1.91404655,
        0.37739932, -2.07692776, -0.06961333, -2.73583526, -2.70524468])[0:dim]    
    
    # Construct solutions along rand_dir_1 through xopt1
    # ------------------------------------------------------
    xgrid_opt_1 = np.tile(xopt1, (ngrid, 1))
    xgrid_opt_1 = np.array(xgrid_opt_1 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_1])))
    
    # Construct solutions along coordinate axes through xopt1
    # -------------------------------------------------------
    xgrid_opt_1_along_axes = []
    for k in range(dim):
        xgrid_along_axis = np.tile(xopt1, (ngrid, 1))
        x_dir = np.zeros(dim)
        x_dir[k] = 1
        xgrid_along_axis = xgrid_along_axis + np.dot(t.reshape(ngrid,1), np.array([x_dir]))
        xgrid_opt_1_along_axes.append(xgrid_along_axis)
    xgrid_opt_1_along_axes = np.array(xgrid_opt_1_along_axes)
    
    # Construct solutions along rand_dir_2 through xopt2
    # ------------------------------------------------------
    xgrid_opt_2 = np.tile(xopt2, (ngrid, 1))
    xgrid_opt_2 = np.array(xgrid_opt_2 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_2])))
    
    # Construct solutions along coordinate axes through xopt1
    # -------------------------------------------------------
    xgrid_opt_2_along_axes = []
    for k in range(dim):
        xgrid_along_axis = np.tile(xopt2, (ngrid, 1))
        x_dir = np.zeros(dim)
        x_dir[k] = 1
        xgrid_along_axis = xgrid_along_axis + np.dot(t.reshape(ngrid,1), np.array([x_dir]))
        xgrid_opt_2_along_axes.append(xgrid_along_axis)
    xgrid_opt_2_along_axes = np.array(xgrid_opt_2_along_axes)
        
    
    # Construct solutions along line through xopt1 and xopt2
    # ------------------------------------------------------
    xgrid_12 = np.tile((xopt1+xopt2)/2, (ngrid, 1))
    xgrid_12 = np.array(xgrid_12 + np.dot(t.reshape(ngrid,1),
                        np.array([xopt2-xopt1])/np.linalg.norm([xopt2-xopt1])
                        )
               )
               
    # Construct solutions along a fully random line
    # ------------------------------------------------------
    xgrid_rand_1 = np.tile(rand_x_1, (ngrid, 1))
    xgrid_rand_1 = np.array(xgrid_rand_1
                   + np.dot(t.reshape(ngrid,1), np.array([rand_dir_3])))

    # and for another fully random line
    # ------------------------------------------------------
    xgrid_rand_2 = np.tile(rand_x_2, (ngrid, 1))
    xgrid_rand_2 = np.array(xgrid_rand_2
                   + np.dot(t.reshape(ngrid,1), np.array([rand_dir_4])))
    
    
    # Evaluate the grid for each direction
    # -------------------------------------------
    fgrid_opt_1 = [f1.evaluate(xgrid_opt_1), f2.evaluate(xgrid_opt_1)]
    fgrid_opt_2 = [f1.evaluate(xgrid_opt_2), f2.evaluate(xgrid_opt_2)]
    fgrid_12 = [f1.evaluate(xgrid_12), f2.evaluate(xgrid_12)]
    fgrid_rand_1 = [f1.evaluate(xgrid_rand_1), f2.evaluate(xgrid_rand_1)]
    fgrid_rand_2 = [f1.evaluate(xgrid_rand_2), f2.evaluate(xgrid_rand_2)]
    fgrid_opt_1_along_axes = []
    for k in range(dim):    
        fgrid_opt_1_along_axes.append([f1.evaluate(xgrid_opt_1_along_axes[k]),
                                       f2.evaluate(xgrid_opt_1_along_axes[k])])                               
    fgrid_opt_2_along_axes = []
    for k in range(dim):    
        fgrid_opt_2_along_axes.append([f1.evaluate(xgrid_opt_2_along_axes[k]),
                                       f2.evaluate(xgrid_opt_2_along_axes[k])])                               
    
    # plot reference sets if available:
    if inputfolder:
        filename = "bbob-biobj_f%02d_i%02d_d%02d_nondominated.adat" % (f_id, inst_id, dim)
        try:
            A = np.array(np.loadtxt(inputfolder + filename, comments='%', usecols = (1,2)))
        except:
            print("Problem opening %s" % (inputfolder + filename))
            e = sys.exc_info()[0]
            print("   Error: %s" % e)

        
        if downsample:
            # normalize A wrt ideal and nadir (and take care of having no inf
            # in data by adding the constant 1e-15 before the log10):
            B = (A-ideal) / (nadir-ideal)
            Blog = np.log10((A-ideal) / (nadir-ideal) + 1e-15)
            # cut precision to downsample:
            decimals=3
            B = np.around(B, decimals=decimals)
            Blog = np.around(Blog, decimals=decimals)
            
            if 11<3: # filter out dominated points (and doubles)
                pfFlag = pf.callParetoFront(B)
                pfFlaglog = pf.callParetoFront(Blog)
            else: # filter out all but one point per grid cell
                pfFlag = np.array([False] * len(B), dtype=bool)
                # check corner case first:
                if not (B[2][0] == B[0][0] and B[2][1] == B[0][1]):
                    pfFlag[2] = True
                else:
                    B[2] = B[0]
                for i in range(3,len(B)):
                    if not (B[i][0] == B[i-1][0] and B[i][1] == B[i-1][1]):
                        pfFlag[i] = True

                pfFlaglog = np.array([False] * len(Blog), dtype=bool)
                # check corner case first:
                if not (Blog[2][0] == Blog[0][0] and Blog[2][1] == Blog[0][1]):
                    pfFlaglog[2] = True
                else:
                    Blog[2] = Blog[0]
                for i in range(3,len(Blog)):
                    if not (Blog[i][0] == Blog[i-1][0] and Blog[i][1] == Blog[i-1][1]):
                        pfFlaglog[i] = True

            # ensure that both extremes are still in, assuming they are stored in the beginning:
            pfFlag[0] = True
            pfFlaglog[0] = True
            pfFlag[1] = True
            pfFlaglog[1] = True            
            Alog = A[pfFlaglog]
            A = A[pfFlag]
            # finally sort wrt f_1 axis:
            Alog = Alog[Alog[:,0].argsort(kind='mergesort')]
            A = A[A[:,0].argsort(kind='mergesort')]
            

        # normalized plot, such that ideal and nadir are mapped to
        # 0 and 1 respectively; add 1e-15 for numerical reasons (to not have
        # inf in the data to plot)
        plt.loglog((Alog[:,0] - ideal[0])/(nadir[0]-ideal[0]) + 1e-15,
                   (Alog[:,1] - ideal[1])/(nadir[1]-ideal[1]) + 1e-15,
                   '.k', markersize=8)
        
    # plot actual solutions along directions:
    numticks = 5
    nf = nadir-ideal # normalization factor used very often now
    for k in range(dim):    
        p6, = ax.loglog(((fgrid_opt_1_along_axes[k])[0]-f1opt)/nf[0],
                        ((fgrid_opt_1_along_axes[k])[1]-f2opt)/nf[1],
                        color=myc[1], ls=myls[0], lw=1, alpha=0.3)
    for k in range(dim):    
        p7, = ax.loglog(((fgrid_opt_2_along_axes[k])[0]-f1opt)/nf[0],
                        ((fgrid_opt_2_along_axes[k])[1]-f2opt)/nf[1],
                        color=myc[1], ls=myls[0], lw=1, alpha=0.3)
            
    p1, = ax.loglog((fgrid_opt_1[0]-f1opt)/nf[0], (fgrid_opt_1[1]-f2opt)/nf[1], color=myc[1], ls=myls[2],
                    label=r'cuts through single optima', **mylw)
    p2, = ax.loglog((fgrid_opt_2[0]-f1opt)/nf[0], (fgrid_opt_2[1]-f2opt)/nf[1], color=myc[1], ls=myls[2],
                    **mylw)
    p3, = ax.loglog((fgrid_12[0]-f1opt)/nf[0], (fgrid_12[1]-f2opt)/nf[1],
                    color=myc[2], ls=myls[2],
                    label=r'cut through both optima', **mylw)
    p4, = ax.loglog((fgrid_rand_1[0]-f1opt)/nf[0], (fgrid_rand_1[1]-f2opt)/nf[1],
                    color=myc[3], ls=myls[2],
                    label=r'two random directions', **mylw)
    p5, = ax.loglog((fgrid_rand_2[0]-f1opt)/nf[0], (fgrid_rand_2[1]-f2opt)/nf[1],
                    color=myc[3], ls=myls[2], **mylw)
        
    

    # print 'ticks' along the axes in equidistant t space:
    numticks = 11
    plot_ticks([fgrid_opt_1[0], fgrid_opt_1[1]], numticks, nadir, ideal, ax, mylw, myc[1], logscale=True)
    plot_ticks([fgrid_opt_2[0], fgrid_opt_2[1]], numticks, nadir, ideal, ax, mylw, myc[1], logscale=True)
    plot_ticks([fgrid_12[0], fgrid_12[1]], numticks, nadir, ideal, ax, mylw, myc[2], logscale=True)
    plot_ticks([fgrid_rand_1[0], fgrid_rand_1[1]], numticks, nadir, ideal, ax, mylw, myc[3], logscale=True)
    plot_ticks([fgrid_rand_2[0], fgrid_rand_2[1]], numticks, nadir, ideal, ax, mylw, myc[3], logscale=True)
    
    # Get Pareto front from vectors of objective values obtained
    objs = np.vstack((fgrid_opt_1[0], fgrid_opt_1[1])).transpose()
    pfFlag_opt_1 = pf.callParetoFront(objs)
    ax.loglog((fgrid_opt_1[0][pfFlag_opt_1]-f1opt)/nf[0],
              (fgrid_opt_1[1][pfFlag_opt_1]-f2opt)/nf[1],
              color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
              alpha=0.4)
    objs = np.vstack((fgrid_opt_2[0], fgrid_opt_2[1])).transpose()
    pfFlag_opt_2 = pf.callParetoFront(objs)
    ax.loglog((fgrid_opt_2[0][pfFlag_opt_2]-f1opt)/nf[0],
              (fgrid_opt_2[1][pfFlag_opt_2]-f2opt)/nf[1],
              color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
              alpha=0.4)
    objs = np.vstack((fgrid_12[0], fgrid_12[1])).transpose()
    pfFlag_12 = pf.callParetoFront(objs)
    ax.loglog((fgrid_12[0][pfFlag_12]-f1opt)/nf[0],
              (fgrid_12[1][pfFlag_12]-f2opt)/nf[1],
              color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0,
              alpha=0.4)
    objs = np.vstack((fgrid_rand_1[0], fgrid_rand_1[1])).transpose()
    pfFlag_rand_1 = pf.callParetoFront(objs)
    ax.loglog((fgrid_rand_1[0][pfFlag_rand_1]-f1opt)/nf[0],
              (fgrid_rand_1[1][pfFlag_rand_1]-f2opt)/nf[1],
              color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
              alpha=0.4)
    objs = np.vstack((fgrid_rand_2[0], fgrid_rand_2[1])).transpose()
    pfFlag_rand_2 = pf.callParetoFront(objs)
    ax.loglog((fgrid_rand_2[0][pfFlag_rand_2]-f1opt)/nf[0],
              (fgrid_rand_2[1][pfFlag_rand_2]-f2opt)/nf[1],
              color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
              alpha=0.4)
    
    
    # plot nadir:
    ax.loglog((nadir[0]-f1opt)/nf[0], (nadir[1]-f2opt)/nf[1],
              color='k', ls='', marker='+', markersize=9, markeredgewidth=1.5,
              alpha=0.9)
    
    
    # beautify:
    ax.set_xlabel(r'$f_1 - f_1^\mathsf{opt}$ (normalized)', fontsize=16)
    ax.set_ylabel(r'$f_2 - f_2^\mathsf{opt}$ (normalized)', fontsize=16)
    ax.legend(loc="best", framealpha=0.2)
    ax.set_title("bbob-biobj $f_{%d}$ along linear search space directions (%d-D, instance %d)" % (f_id, dim, inst_id))
    [line.set_zorder(3) for line in ax.lines]
    [line.set_zorder(3) for line in ax.lines]
    fig.subplots_adjust(left=0.1) # more room for the y-axis label
    
    # we might want to zoom in a bit:
    ax.set_xlim((1e-3, plt.xlim()[1]))
    ax.set_ylim((1e-3, plt.ylim()[1]))
    #    ax.set_ylim((0, 2*(nadir[1] - f2opt)))
    
    # add rectangle as ROI
    ax.add_patch(patches.Rectangle(
            ((ideal[0]-f1opt)/nf[0] + 1e-16, (ideal[1]-f2opt)/nf[1] + 1e-16),
             (nadir[0]-ideal[0])/nf[0], (nadir[1]-ideal[1])/nf[1],
             alpha=0.05,
             color='k'))
    
    if tofile:
        if not os.path.exists(outputfolder):
            os.makedirs(outputfolder)
        filename = outputfolder + "directions-f%02d-i%02d-d%02d-logobjspace" % (f_id, inst_id, dim)
        saveFigure(filename, verbose=True)
    else:   
        plt.show(block=True)
        
    plt.close()    
    
    
    
    
    
    ##############################################################
    #                                                            #
    # Plot the same, but not in log-scale.                       #
    #                                                            #
    ##############################################################
    
    fig = plt.figure(2)
    ax = fig.add_subplot(111)
    
    # plot reference sets if available:
    if inputfolder:
        plt.plot(A[:,0], A[:,1], '.k', markersize=8)
    
    
    for k in range(dim):    
        p6, = ax.plot((fgrid_opt_1_along_axes[k])[0],
                      (fgrid_opt_1_along_axes[k])[1],
                      color=myc[1], ls=myls[0], lw=1, alpha=0.3)
    for k in range(dim):    
        p7, = ax.plot((fgrid_opt_2_along_axes[k])[0],
                      (fgrid_opt_2_along_axes[k])[1],
                      color=myc[1], ls=myls[0], lw=1, alpha=0.3)    
    p1, = ax.plot(fgrid_opt_1[0], fgrid_opt_1[1], color=myc[1], ls=myls[2],
                    label=r'cuts through single optima', **mylw)
    
    p2, = ax.plot(fgrid_opt_2[0], fgrid_opt_2[1], color=myc[1], ls=myls[2],
                    **mylw)
    
    p3, = ax.plot(fgrid_12[0], fgrid_12[1], color=myc[2], ls=myls[2],
                    label=r'cut through both optima', **mylw)
    
    p4, = ax.plot(fgrid_rand_1[0], fgrid_rand_1[1], color=myc[3], ls=myls[2],
                    label=r'two random directions', **mylw)
    
    p4, = ax.plot(fgrid_rand_2[0], fgrid_rand_2[1], color=myc[3], ls=myls[2],
                    **mylw)
        
    # plot a few ticks along directions, equi-distant in search space:
    numticks = 11
    plot_ticks(fgrid_opt_1, numticks, nadir, ideal, ax, mylw, 'b')
    plot_ticks(fgrid_opt_2, numticks, nadir, ideal, ax, mylw, 'b')
    plot_ticks(fgrid_12, numticks, nadir, ideal, ax, mylw, 'r')
    plot_ticks(fgrid_rand_1, numticks, nadir, ideal, ax, mylw, 'y')
    plot_ticks(fgrid_rand_2, numticks, nadir, ideal, ax, mylw, 'y')

    
    
    # plot non-dominated points
    ax.plot(fgrid_opt_1[0][pfFlag_opt_1], fgrid_opt_1[1][pfFlag_opt_1], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(fgrid_opt_2[0][pfFlag_opt_2], fgrid_opt_2[1][pfFlag_opt_2], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(fgrid_12[0][pfFlag_12], fgrid_12[1][pfFlag_12], color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(fgrid_rand_1[0][pfFlag_rand_1], fgrid_rand_1[1][pfFlag_rand_1], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(fgrid_rand_2[0][pfFlag_rand_2], fgrid_rand_2[1][pfFlag_rand_2], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
        

    # plot nadir:
    ax.plot(nadir[0], nadir[1], color='k', ls='', marker='+', markersize=9, markeredgewidth=1.5,
                                 alpha=0.9)
    # plot ideal:
    ax.plot(ideal[0], ideal[1], color='k', ls='', marker='x', markersize=8, markeredgewidth=1.5,
                                 alpha=0.9)

    # plot extremes    
    ax.plot(f_xopt1[0], f_xopt1[1], color='blue', ls='', marker='*', markersize=8, markeredgewidth=0.5, markeredgecolor='black')
    ax.plot(f_xopt2[0], f_xopt2[1], color='blue', ls='', marker='*', markersize=8, markeredgewidth=0.5, markeredgecolor='black')
    
    
    # beautify:
    ax.set_xlabel(r'first objective', fontsize=16)
    ax.set_ylabel(r'second objective', fontsize=16)
    ax.legend(loc="best", framealpha=0.2)
    ax.set_title("bbob-biobj $f_{%d}$ along linear search space directions (%d-D, instance %d)" % (f_id, dim, inst_id))    
    [line.set_zorder(3) for line in ax.lines]
    [line.set_zorder(3) for line in ax.lines]
    fig.subplots_adjust(left=0.1) # more room for the y-axis label
    
    # zoom into Pareto front:
    ax.set_xlim((ideal[0]-0.05*(nadir[0] - ideal[0]), nadir[0] + (nadir[0] - ideal[0])))
    ax.set_ylim([ideal[1]-0.05*(nadir[1] - ideal[1]), nadir[1] + (nadir[1] - ideal[1])])
    
    # add rectangle as ROI
    ax.add_patch(patches.Rectangle(
            (ideal[0], ideal[1]), nadir[0]-ideal[0], nadir[1]-ideal[1],
            alpha=0.05,
            color='k'))
    
    if tofile:
        if not os.path.exists(outputfolder):
            os.makedirs(outputfolder)
        filename = outputfolder + "directions-f%02d-i%02d-d%02d-objspace" % (f_id, inst_id, dim)
        saveFigure(filename, verbose=True)
    else:        
        plt.show(block=True)
    
    plt.close()
    
    
    ##############################################################
    #                                                            #
    # Finally, the corresponding plots in search space, i.e.     #
    # projections of it onto the variables x_1 and x_(dim-1)     #
    # (or x1, x2 in the case of not enough variables).           #
    #                                                            #
    ##############################################################
    fig = plt.figure(3)
    ax = fig.add_subplot(111)
    
    # plot reference sets if available:
    #if inputfolder:
    #    plt.plot(A[:,0], A[:,1], '.k', markersize=8)
    
    ax.set_xlabel(r'$x_1$', fontsize=16)
    # fix second variable in addition to x_1:
    if dim > 2:
        second_variable = -2
        ax.set_ylabel(r'$x_{%d}$' % (dim-1), fontsize=16)
    else:
        second_variable = 1
        ax.set_ylabel(r'$x_{%d}$' % dim, fontsize=16)
    
    # read and plot best Pareto set approximation
    if inputfolder:
        filename = "bbob-biobj_f%02d_i%02d_d%02d_nondominated.adat" % (f_id, inst_id, dim)
        C = []
        with open(inputfolder + filename) as f:
            for line in f:
                splitline = line.split()
                if len(splitline) == (dim + 3):  # has line x-values?
                    C.append(np.array(splitline[3:], dtype=np.float))
        C = np.array(C)
        C = C[C[:, second_variable].argsort(kind='mergesort')] # sort wrt x_{second_variable} first
        C = C[C[:, 0].argsort(kind='mergesort')] # now wrt x_1 to finally get a stable sort
        pareto_set_approx_size = C.shape[0]

        # filter out all but one point per grid cell in the 
        # (x_1, x_{second_variable}) space
        if downsample:
            decimals=2
            X = np.around(C, decimals=decimals)
            # sort wrt x_{second_variable} first
            idx_1 = X[:, second_variable].argsort(kind='mergesort')
            X = X[idx_1] 
            # now wrt x_1 to finally get a stable sort
            idx_2 = X[:, 0].argsort(kind='mergesort')
            X = X[idx_2]
            xflag = np.array([False] * len(X), dtype=bool)
            xflag[0] = True # always take the first point
            for i in range(1, len(X)):
                if not (X[i,0] == X[i-1,0] and
                        X[i,second_variable] == X[i-1, second_variable]):
                    xflag[i] = True
            X = ((C[idx_1])[idx_2])[xflag]

        pareto_set_sample_size = X.shape[0]
        
        paretosetlabel = ('reference set (%d of %d points)' %
                          (pareto_set_sample_size, pareto_set_approx_size))
        plt.plot(X[:, 0], X[:, second_variable], '.k', markersize=8,
                 label=paretosetlabel)
    # end of reading in and plotting best Pareto set approximation

    for k in range(dim):    
        p6, = ax.plot(xgrid_opt_1_along_axes[k][:, 0],
                      xgrid_opt_1_along_axes[k][:, second_variable],
                      color=myc[1], ls=myls[0], lw=1, alpha=0.3)
    for k in range(dim):
        p7, = ax.plot(xgrid_opt_2_along_axes[k][:, 0],
                      xgrid_opt_2_along_axes[k][:, second_variable],
                      color=myc[1], ls=myls[0], lw=1, alpha=0.3)

    p1, = ax.plot(xgrid_opt_1[:, 0], xgrid_opt_1[:, second_variable], color=myc[1], ls=myls[2],
                    label=r'cuts through single optima', **mylw)

    p2, = ax.plot(xgrid_opt_2[:, 0], xgrid_opt_2[:, second_variable], color=myc[1], ls=myls[2],
                    **mylw)

    p3, = ax.plot(xgrid_12[:, 0], xgrid_12[:, second_variable], color=myc[2], ls=myls[2],
                    label=r'cut through both optima', **mylw)

    p4, = ax.plot(xgrid_rand_1[:, 0], xgrid_rand_1[:, second_variable], color=myc[3], ls=myls[2],
                    label=r'two random directions', **mylw)

    p5, = ax.plot(xgrid_rand_2[:, 0], xgrid_rand_2[:, second_variable], color=myc[3], ls=myls[2],
                    **mylw)

    # plot non-dominated points
    ax.plot(xgrid_opt_1[pfFlag_opt_1, 0], xgrid_opt_1[pfFlag_opt_1, second_variable], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(xgrid_opt_2[pfFlag_opt_2, 0], xgrid_opt_2[pfFlag_opt_2, second_variable], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(xgrid_12[pfFlag_12, 0], xgrid_12[pfFlag_12, second_variable], color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(xgrid_rand_1[pfFlag_rand_1, 0], xgrid_rand_1[pfFlag_rand_1, second_variable], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)
    ax.plot(xgrid_rand_2[pfFlag_rand_2, 0], xgrid_rand_2[pfFlag_rand_2, second_variable], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0,
                                 alpha=0.4)


    # highlight the region [-5,5]
    ax.add_patch(patches.Rectangle(
            (-5, -5), 10, 10,
            alpha=0.05,
            color='k'))
    
    # beautify
    ax.set_xlim([-6, 6])
    ax.set_ylim([-6, 6])
    if dim == 2:
        ax.set_title("decision space of bbob-biobj $f_{%d}$ (%d-D, instance %d)" % (f_id, dim, inst_id))    
    else:
        ax.set_title("projection of decision space for bbob-biobj $f_{%d}$ (%d-D, instance %d)" % (f_id, dim, inst_id))    
    ax.legend(loc="best", framealpha=0.2, numpoints=1)
    fig.subplots_adjust(left=0.1) # more room for the y-axis label    
    
    # printing
    if tofile:
        if not os.path.exists(outputfolder):
            os.makedirs(outputfolder)
        filename = outputfolder + "directions-f%02d-i%02d-d%02d-searchspace" % (f_id, inst_id, dim)
        saveFigure(filename, verbose=True)
    else:        
        plt.show(block=True)
    
    plt.close()
Beispiel #14
0
        BSF_Epoch = []

        FId = int(sys.argv[1])
        D = int(sys.argv[2])  # Number of Dimensions
        F_flag = sys.argv[3]  # "Vector"  # Cte, Scalar, Vector
        ms_type = sys.argv[4]  # 'population'  # population  static individual
        NP = int(sys.argv[5])  # 20  # Population Number
        ms_indx = sys.argv[6]  # index of mutation scheme
        Bound = int(sys.argv[7])
        Cr = 0.5  # CrossOver Rate
        VTR = 1e-8  # Value to Reach
        N_Epoch = 30  # Number of Epochs
        NFC = 10000 * D  # Number of Function Calls
        LB = -Bound  #
        UB = Bound  # Upper bound
        f_gen = fgeneric.LoggingFunction('tmp').setfun(*bn.instantiate(FId))
        OGV = f_gen.ftarget  # Optimal Global Value to Reach
        readme_log = 'D:' + str(D) + ' NP:' + str(NP) + ' NFC:' + str(NFC) + \
                     ' MF:' + F_flag + ' MST:' + ms_type + ' MSI:' + str(ms_indx) + \
                     ' Limit:' + str(Bound) + 'N_Epoch:' + str(N_Epoch) + ' Cr:' + str(Cr) + ' VTR:' + str(VTR)

        logging.basicConfig(filename=script_name[:-3] + '.log',
                            level=logging.INFO)
        logging.info(readme_log)

        ms_list = ['rand1', 'best1', 'tbest1', 'best2', 'rand2']

        if ms_indx == 'null':
            ms_indx = 'null'
        else:
            ms_indx = int(ms_indx)
Beispiel #15
0
def DataLogAllFun(dimNum):
    for fid in range(1, 25):
        f.setfun(*bbobbenchmarks.instantiate(fid, 1))
        DataLog(fid, dimNum)
        if fbest < ftarget:  # task achieved 
            break

        # compute the next population
        alg.run(1)

    return xbest

t0 = time.time()
np.random.seed(int(t0))

f = fgeneric.LoggingFunction(datapath, **opts)
for dim in dimensions:  # small dimensions first, for CPU reasons
    for fun_id in function_ids:
        for iinstance in instances:
            f.setfun(*bbobbenchmarks.instantiate(fun_id, iinstance=iinstance))

            # independent restarts until maxfunevals or ftarget is reached
            for restarts in xrange(maxrestarts + 1):
                if restarts > 0:
                    f.restart('independent restart')  # additional info
                run_optimizer(f.evalfun, dim,  eval(maxfunevals) - f.evaluations,
                              f.ftarget)
                if (f.fbest < f.ftarget
                    or f.evaluations + eval(minfunevals) > eval(maxfunevals)):
                    break

            f.finalizerun()

            print('  f%d in %d-D, instance %d: FEs=%d with %d restarts, '
                  'fbest-ftarget=%.4e, elapsed time [h]: %.2f'
Beispiel #17
0
        BSF_Epoch = []

        FId = int(sys.argv[1])
        D = int(sys.argv[2])  # Number of Dimensions
        F_flag = sys.argv[3]  # "Vector"  # Cte, Scalar, Vector
        ms_type = sys.argv[4]  # 'population'  # population  static individual
        NP = int(sys.argv[5]) # 20  # Population Number
        ms_indx = sys.argv[6]  # index of mutation scheme
        Bound = int(sys.argv[7])
        Cr = 0.5  # CrossOver Rate
        VTR = 1e-8  # Value to Reach
        N_Epoch = 30 # Number of Epochs
        NFC = 10000*D  # Number of Function Calls
        LB = -Bound  #
        UB = Bound  # Upper bound
        f_gen = fgeneric.LoggingFunction('tmp').setfun(*bn.instantiate(FId))
        OGV = f_gen.ftarget  # Optimal Global Value to Reach
        readme_log = 'D:' + str(D) + ' NP:' + str(NP) + ' NFC:' + str(NFC) + \
                     ' MF:' + F_flag + ' MST:' + ms_type + ' MSI:' + str(ms_indx) + \
                     ' Limit:' + str(Bound) + 'N_Epoch:' + str(N_Epoch) + ' Cr:' + str(Cr) + ' VTR:' + str(VTR)

        logging.basicConfig(filename=script_name[:-3]+'.log', level=logging.INFO)
        logging.info(readme_log)

        ms_list = ['rand1', 'best1', 'tbest1', 'best2', 'rand2']

        if ms_indx == 'null':
            ms_indx = 'null'
        else:
            ms_indx = int(ms_indx)
Beispiel #18
0
largestAbsDifference = 0
largestRelDifference = 0
numberOfProblems = 0
numberOfSearchPoints = 0

for problem_index, problem in enumerate(suite):

    f = int(problem.id.lower().split('_f')[1].split('_')[0])
    d = int(problem.id.lower().split('_d')[1].split('_')[0])
    i = int(problem.id.lower().split('_i')[1].split('_')[0])
    numberOfProblems = numberOfProblems + 1

    xrand = -4 + 8 * np.random.rand(d)
    numberOfSearchPoints = numberOfSearchPoints + 1

    fun, fopt = bm.instantiate(f, iinstance=i)
    fold = fun.evaluate(xrand)

    fnew = problem(xrand)

    if (fnew - fold > 1e-10 or fnew - fold < -1e-10):
        print(problem.id + ":")
        print("%e, %e, %e" % (fnew - fold, fnew, fold))
        print('!!!!!!!!!!!!!!!!!!!')
        numberOfDifferences = numberOfDifferences + 1
        if abs(fnew - fold) > largestAbsDifference:
            largestAbsDifference = abs(fnew - fold)
        if abs(fnew - fold) / min(abs(fnew), abs(fold)) > largestRelDifference:
            largestRelDifference = abs(fnew - fold) / min(abs(fnew), abs(fold))

print("---------------------------------------------------------")
Beispiel #19
0
def get_all_aRT_values_in_objective_space(f_id, dim, f1_id, f2_id,
                   inputfolder=None, logscale=True, downsample=True,
                   with_grid=False):
    """
    Returns a set of points in objective space and their corresponding
    aRT values for the specified algorithm data (on function `f_id` in
    dimension `dim`, given in the folder `inputfolder`). Data points
    produced after cropbudget function evaluations are not taken into account.
    
    The points in objective space are thereby either generated on a grid
    (if `with_grid == True` either in logscale or not) or constructed from the
    actual data points of the algorithm (TODO: not supported yet). Note that
    the returned points will be already sorted in order of their aRTs (in
    decreasing order).
    
    If `downsample == True`, the input data will be reduced by taking into
    account only one input point per objective space cell where the cells
    correspond to the objective vectors of the above mentioned grid.
    In any case, all points outside [0,maxplot] (and [precision, maxplot] in 
    the locscale case) are removed. For later plotting of the input points, the
    already downsampled input points are also returned as a third argument
    (in a dictionary, giving for each entry the data points associated to
    the corresponding instance/run).

    Assumes that each instance is only contained once in the data.
    """
    
    # obtain the data of the algorithm run to display:
    filename = "bbob-biobj_f%02d_d%02d_nondom_all.adat" % (f_id, dim)
    #filename = "bbob-biobj_f%02d_d%02d_nondom_instance1.adat" % (f_id, dim)
    try:
        A = {}
        instance = -1
        B = []
        nadirs = {}
        ideals = {}
        if downsample:
            print('reading in data and downsampling them to %dx%d grid...' % (n, n))
        else:
            print('reading in data...')
        
        with open(inputfolder + filename) as f:
            for line in f:
                if "function eval_number" in line:
                    continue
                elif "evaluations =" in line:
                    continue
                elif "instance" in line:
                    # store first data of previous instance:
                    if instance not in A and not instance == -1:
                        # downsample, i.e., filter out all but one point per grid cell in the 
                        # objective space
                        blen = len(B)
                        if downsample:
                            B = sample_down(B, n, logscale=logscale)
                        print("instance data points downsampled from %d to %d" % (blen, len(B)))
                        
                        A[instance] = B

                    # reset instance and B:
                    instance = int((line.split()[3])[:-1])
                    B = []
                    # get ideal and nadir for this instance:
                    f1, f1opt = bm.instantiate(f1_id, iinstance=biobjinst[instance][0])
                    f2, f2opt = bm.instantiate(f2_id, iinstance=biobjinst[instance][1])
                    fdummy = f1.evaluate(np.zeros((1, dim)))
                    fdummy = f2.evaluate(np.zeros((1, dim)))
                    nadir = np.array([f1.evaluate(f2.xopt), f2.evaluate(f1.xopt)])
                    ideal = np.array([f1opt, f2opt])
                    nadirs[instance] = nadir
                    ideals[instance] = ideal
                else:
                    splitline = line.split()
                    newline = np.array(splitline[:3], dtype=np.float)
                                        
                    if newline[0] <= eval(cropbudget):
                        # normalize objective vector:
                        newline[1] = (newline[1]-ideals[instance][0])/(nadirs[instance][0]-ideals[instance][0])
                        newline[2] = (newline[2]-ideals[instance][1])/(nadirs[instance][1]-ideals[instance][1])
                        # assume that all points are >0 for both objectives
                        # and remove all above `maxplot`:
                        if newline[1] <= maxplot and newline[2] <= maxplot:
                            B.append(newline)
                            
            # store data of final instance:
            if instance not in A and not instance == -1:
                blen = len(B)
                if downsample:
                    B = sample_down(B, n, logscale=logscale)
                A[instance] = B
                print("instance data points downsampled from %d to %d" % (blen, len(B)))

            print("all %d instances read in" % len(A))


    except:
        print("Problem opening %s" % (inputfolder + filename))
        e = sys.exc_info()[0]
        print("   Error: %s" % e)


    
    # construct grid in normalized objective (sub-)space [precision, maxplot]:
    if with_grid:
        if logscale:
            log_range = np.logspace(np.log10(precision), np.log10(maxplot), num=n, endpoint=True, base=10.0)
            gridpoints = np.array(list(product(log_range, log_range)))
        else:
            gridpoints = maxplot * np.array(list(product(range(n),range(n))))/(n-1)
    else:
        raise NotImplementedError # for the moment, the plotting is not
                                  # memory-efficient enough to handle even
                                  # small data sets
        ticks = []
        for key in A:
            for a in A[key]:
                if a[1] not in ticks:
                    ticks.append(a[1])
                if a[2] not in ticks:
                    ticks.append(a[2])
        ticks.sort()
        print("producing set of potential %dx%d (irregular) grid points where aRTA plot can change" % (len(ticks), len(ticks)))
        gridpoints = np.array(list(product(ticks, ticks)))
        


    aRTs = compute_aRT(gridpoints, A)

    # sort gridpoints (and of course colors) wrt. their aRT:
    idx = aRTs.argsort(kind='mergesort')
    aRTs = aRTs[idx]
    gridpoints = gridpoints[idx]

    return gridpoints, aRTs, A