def main(dictAlg, dsref=None, order=None, targets=defaulttargets, outputdir='', info='default', verbose=True): """Generates image files of the performance profiles of algorithms From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the performance profile (Moré:2008) on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances, one dataSetList :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order """ for d, dictalgdim in dictAlg.dictAlgByDim().iteritems(): plotmultiple(dictalgdim, dsref, targets) figureName = os.path.join(outputdir, 'ppperfprof_%02dD_%s' % (d, info)) saveFigure(figureName, verbose=verbose) plt.close()
def main(dsList0, dsList1, dim, targetsOfInterest=None, outputdir='', info='default', verbose=True): """Generate figures of empirical cumulative distribution functions. :param DataSetList dsList0: data set of reference algorithm :param DataSetList dsList1: data set of algorithm of concern :param int dim: dimension :param TargetValues targetsOfInterest: target function values to be displayed :param bool isStoringXMax: if set to True, the first call BeautifyVD sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity Outputs: Image files of the empirical cumulative distribution functions. """ #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) figureName = os.path.join(outputdir, 'pplogabs_%s' % (info)) handles = plotLogAbs(dsList0, dsList1, dim, targetsOfInterest, verbose=verbose) beautify(handles) funcs = set(dsList0.dictByFunc().keys()) & set(dsList1.dictByFunc().keys()) text = 'f%s' % consecutiveNumbers(sorted(funcs)) if len(dsList0.dictByDim().keys()) == len(dsList1.dictByDim().keys()) == 1: text += ',%d-D' % dsList0.dictByDim().keys()[0] plt.text(0.98, 0.02, text, horizontalalignment="right", transform=plt.gca().transAxes) saveFigure(figureName, verbose=verbose) plt.close()
def main(dictAlg, outputdir='.', verbose=True): """Main routine for generating convergence plots """ global warned # bind variable warned into this scope dictFun = pproc.dictAlgByFun(dictAlg) for l in dictFun: # l appears to be the function id!? for i in dictFun[l]: # please, what is i??? appears to be the algorithm-key plt.figure() if 1 < 3: # no algorithm name in filename, as everywhere else figurename = "ppconv_" + "f%03d" % l else: # previous version with algorithm name, but this is not very practical later if type(i) in (list, tuple): figurename = "ppconv_plot_" + i[0] + "_f" + str(l) else: try: figurename = "ppconv_plot_" + dictFun[l][i].algId + "_f" + str(l) except AttributeError: # this is a (rather desperate) bug-fix attempt that works for the unit test figurename = "ppconv_plot_" + dictFun[l][i][0].algId + "_f" + str(l) plt.xlabel('number of function evaluations / dimension') plt.ylabel('Median of fitness') plt.grid() ax = plt.gca() ax.set_yscale("log") ax.set_xscale("log") for j in dictFun[l][i]: # please, what is j??? a dataset dimList_b = [] dimList_f = [] dimList_b.append(j.funvals[:,0]) dimList_f.append(j.funvals[:,1:]) bs, fs= rearrange(dimList_b, dimList_f) labeltext=str(j.dim)+"D" try: if 11 < 3: plt.errorbar(bs[0] / j.dim, fs[0][0], yerr = [fs[0][1], fs[0][2]], label = labeltext) else: plt.errorbar(bs[0] / j.dim, fs[0][0], label = labeltext) except FloatingPointError: # that's a bit of a hack if 1 < 3 or not warned: print('Warning: floating point error when plotting errorbars, ignored') warned = True beautify() saveFigure(os.path.join(outputdir, figurename.replace(' ','')), genericsettings.getFigFormats(), verbose=verbose) plt.close() try: algname = str(dictFun[l].keys()[0][0]) except KeyError: algname = str(dictFun[l].keys()[0]) save_single_functions_html(os.path.join(outputdir, 'ppconv'), algname) # first try print("Convergence plots done.")
def main(dictAlg, outputdir='.', verbose=True): """Main routine for generating convergence plots """ global warned # bind variable warned into this scope dictFun = pproc.dictAlgByFun(dictAlg) for l in dictFun: # l appears to be the function id!? for i in dictFun[ l]: # please, what is i??? appears to be the algorithm-key plt.figure() if type(i) in (list, tuple): figurename = "ppconv_plot_" + i[0] + "_f" + str(l) else: try: figurename = "ppconv_plot_" + dictFun[l][ i].algId + "_f" + str(l) except AttributeError: # this is a (rather desperate) bug-fix attempt that works for the unit test figurename = "ppconv_plot_" + dictFun[l][i][ 0].algId + "_f" + str(l) plt.xlabel('number of function evaluations / dimension') plt.ylabel('Median of fitness') plt.grid() ax = plt.gca() ax.set_yscale("log") ax.set_xscale("log") for j in dictFun[l][i]: # please, what is j??? a dataset dimList_b = [] dimList_f = [] dimList_b.append(j.funvals[:, 0]) dimList_f.append(j.funvals[:, 1:]) bs, fs = rearrange(dimList_b, dimList_f) labeltext = str(j.dim) + "D" try: if 11 < 3: plt.errorbar(bs[0] / j.dim, fs[0][0], yerr=[fs[0][1], fs[0][2]], label=labeltext) else: plt.errorbar(bs[0] / j.dim, fs[0][0], label=labeltext) except FloatingPointError: # that's a bit of a hack if not warned: print( 'Warning: floating point error when plotting errorbars, ignored' ) warned = True plt.legend(loc=3) saveFigure(os.path.join(outputdir, figurename.replace(' ', '')), genericsettings.fig_formats, verbose=verbose) plt.close() print("Convergence plots done.")
def main(dsList0, dsList1, dim, targetsOfInterest=None, outputdir='', info='default', verbose=True): """Generate figures of empirical cumulative distribution functions. :param DataSetList dsList0: data set of reference algorithm :param DataSetList dsList1: data set of algorithm of concern :param int dim: dimension :param TargetValues targetsOfInterest: target function values to be displayed :param bool isStoringXMax: if set to True, the first call BeautifyVD sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity Outputs: Image files of the empirical cumulative distribution functions. """ #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) figureName = os.path.join(outputdir,'pplogabs_%s' %(info)) handles = plotLogAbs(dsList0, dsList1, dim, targetsOfInterest, verbose=verbose) beautify(handles) funcs = set(dsList0.dictByFunc().keys()) & set(dsList1.dictByFunc().keys()) text = '%s' % ', '.join(funcs) if len(dsList0.dictByDim().keys()) == len(dsList1.dictByDim().keys()) == 1: text += ',%d-D' % dsList0.dictByDim().keys()[0] plt.text(0.98, 0.02, text, horizontalalignment="right", transform=plt.gca().transAxes) saveFigure(figureName, verbose=verbose) plt.close()
def main(dictAlg, outputdir=".", verbose=True): """Main routine for generating convergence plots """ global warned # bind variable warned into this scope dictFun = pproc.dictAlgByFun(dictAlg) for l in dictFun: # l appears to be the function id!? for i in dictFun[l]: # please, what is i??? appears to be the algorithm-key plt.figure() if type(i) in (list, tuple): figurename = "ppconv_plot_" + i[0] + "_f" + str(l) else: try: figurename = "ppconv_plot_" + dictFun[l][i].algId + "_f" + str(l) except AttributeError: # this is a (rather desperate) bug-fix attempt that works for the unit test figurename = "ppconv_plot_" + dictFun[l][i][0].algId + "_f" + str(l) plt.xlabel("number of function evaluations / dimension") plt.ylabel("Median of fitness") plt.grid() ax = plt.gca() ax.set_yscale("log") ax.set_xscale("log") for j in dictFun[l][i]: # please, what is j??? a dataset dimList_b = [] dimList_f = [] dimList_b.append(j.funvals[:, 0]) dimList_f.append(j.funvals[:, 1:]) bs, fs = rearrange(dimList_b, dimList_f) labeltext = str(j.dim) + "D" try: if 11 < 3: plt.errorbar(bs[0] / j.dim, fs[0][0], yerr=[fs[0][1], fs[0][2]], label=labeltext) else: plt.errorbar(bs[0] / j.dim, fs[0][0], label=labeltext) except FloatingPointError: # that's a bit of a hack if not warned: print("Warning: floating point error when plotting errorbars, ignored") warned = True plt.legend(loc=3) saveFigure( os.path.join(outputdir, figurename.replace(" ", "")), genericsettings.fig_formats, verbose=verbose ) plt.close() print("Convergence plots done.")
def main(dsList, _valuesOfInterest, outputdir, verbose=True): """From a DataSetList, returns a convergence and ERT/dim figure vs dim. Uses data of BBOB 2009 (:py:mod:`bbob_pproc.bestalg`). :param DataSetList dsList: data sets :param seq _valuesOfInterest: target precisions, there might be as many graphs as there are elements in this input :param string outputdir: output directory :param bool verbose: controls verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() dictFunc = dsList.dictByFunc() for func in dictFunc: plot(dictFunc[func], _valuesOfInterest, styles=styles) # styles might have changed via config beautify(axesLabel=False) plt.text(plt.xlim()[0], plt.ylim()[0], _valuesOfInterest.short_info, fontsize=14) if func in functions_with_legend: plt.legend(loc="best") if isBenchmarkinfosFound: plt.gca().set_title(funInfos[func]) plot_previous_algorithms(func, _valuesOfInterest) filename = os.path.join(outputdir, 'ppfigdim_f%03d' % (func)) saveFigure(filename, verbose=verbose) plt.close()
def main(dsList, _valuesOfInterest, outputdir, verbose=True): """From a DataSetList, returns a convergence and ERT/dim figure vs dim. Uses data of BBOB 2009 (:py:mod:`bbob_pproc.bestalg`). :param DataSetList dsList: data sets :param seq _valuesOfInterest: target precisions, either as list or as ``pproc.TargetValues`` class instance. There will be as many graphs as there are elements in this input. :param string outputdir: output directory :param bool verbose: controls verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) _valuesOfInterest = pproc.TargetValues.cast(_valuesOfInterest) if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() dictFunc = dsList.dictByFunc() for func in dictFunc: plot(dictFunc[func], _valuesOfInterest, styles=styles) # styles might have changed via config beautify(axesLabel=False) plt.text(plt.xlim()[0], plt.ylim()[0], _valuesOfInterest.short_info, fontsize=14) if func in functions_with_legend: plt.legend(loc="best") if isBenchmarkinfosFound: plt.gca().set_title(funInfos[func]) plot_previous_algorithms(func, _valuesOfInterest) filename = os.path.join(outputdir, 'ppfigdim_f%03d' % (func)) saveFigure(filename, verbose=verbose) plt.close()
def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) displaybest2009 = not isBiobjective #disabled until we find the bug if displaybest2009: #set_trace() bestalgentries = bestalg.loadBestAlgorithm(isBiobjective) bestalgentry = bestalgentries[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len(order) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f') text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join((str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' %(dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED ) if close_figure: plt.close()
def main(dsList, isStoringXMax = False, outputdir = '', info = 'default', verbose = True): """Generate figures of empirical cumulative distribution functions. This method has a feature which allows to keep the same boundaries for the x-axis, if ``isStoringXMax==True``. This makes sense when dealing with different functions or subsets of functions for one given dimension. CAVE: this is bug-prone, as some data depend on the maximum evaluations and the appearence therefore depends on the calling order. :param DataSetList dsList: list of DataSet instances to process. :param bool isStoringXMax: if set to True, the first call :py:func:`beautifyFVD` sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) targets = single_target_values # convenience abbreviation for d, dictdim in dsList.dictByDim().iteritems(): maxEvalsFactor = max(i.mMaxEvals() / d for i in dictdim) if isStoringXMax: global evalfmax else: evalfmax = None if not evalfmax: evalfmax = maxEvalsFactor if runlen_xlimits_max is not None: evalfmax = runlen_xlimits_max # first figure: Run Length Distribution filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info)) fig = plt.figure() for j in range(len(targets)): plotRLDistr(dictdim, lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), evalfmax, # can be larger maxEvalsFactor with no effect ** rldStyles[j % len(rldStyles)]) funcs = list(i.funcId for i in dictdim) text = 'f%s' % (consecutiveNumbers(sorted(funcs))) text += ',%d-D' % d if(1): # try: if not isinstance(targets, pproc.RunlengthBasedTargetValues): # if targets.target_values[-1] == 1e-8: # this is a hack plot_previous_algorithms(d, funcs) else: plotRLB_previous_algorithms(d, funcs) # except: # pass plt.axvline(x = maxEvalsFactor, color = 'k') # vertical line at maxevals plt.legend(loc = 'best') plt.text(0.5, 0.98, text, horizontalalignment = "center", verticalalignment = "top", transform = plt.gca().transAxes # bbox=dict(ec='k', fill=False) ) try: # was never tested, so let's make it safe if len(funcs) == 1: plt.title(genericsettings.current_testbed.info(funcs[0])[:27]) except: warnings.warn('could not print title') beautifyRLD(evalfmax) saveFigure(filename, verbose = verbose) plt.close(fig) # second figure: Function Value Distribution filename = os.path.join(outputdir, 'ppfvdistr_%02dD_%s' % (d, info)) fig = plt.figure() plotFVDistr(dictdim, np.inf, 1e-8, **rldStyles[-1]) # coloring right to left for j, max_eval_factor in enumerate(single_runlength_factors): if max_eval_factor > maxEvalsFactor: break plotFVDistr(dictdim, max_eval_factor, 1e-8, **rldUnsuccStyles[j % len(rldUnsuccStyles)]) plt.text(0.98, 0.02, text, horizontalalignment = "right", transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False), beautifyFVD(isStoringXMax = isStoringXMax, ylabel = False) saveFigure(filename, verbose = verbose) plt.close(fig)
def comp(dsList0, dsList1, targets, isStoringXMax = False, outputdir = '', info = 'default', verbose = True): """Generate figures of ECDF that compare 2 algorithms. :param DataSetList dsList0: list of DataSet instances for ALG0 :param DataSetList dsList1: list of DataSet instances for ALG1 :param seq targets: target function values to be displayed :param bool isStoringXMax: if set to True, the first call :py:func:`beautifyFVD` sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) if not isinstance(targets, pproc.RunlengthBasedTargetValues): targets = pproc.TargetValues.cast(targets) dictdim0 = dsList0.dictByDim() dictdim1 = dsList1.dictByDim() for d in set(dictdim0.keys()) & set(dictdim1.keys()): maxEvalsFactor = max(max(i.mMaxEvals() / d for i in dictdim0[d]), max(i.mMaxEvals() / d for i in dictdim1[d])) if isStoringXMax: global evalfmax else: evalfmax = None if not evalfmax: evalfmax = maxEvalsFactor ** 1.05 if runlen_xlimits_max is not None: evalfmax = runlen_xlimits_max filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info)) fig = plt.figure() for j in range(len(targets)): tmp = plotRLDistr(dictdim0[d], lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), marker = genericsettings.line_styles[1]['marker'], **rldStyles[j % len(rldStyles)]) plt.setp(tmp[-1], label = None) # Remove automatic legend # Mods are added after to prevent them from appearing in the legend plt.setp(tmp, markersize = 20., markeredgewidth = plt.getp(tmp[-1], 'linewidth'), markeredgecolor = plt.getp(tmp[-1], 'color'), markerfacecolor = 'none') tmp = plotRLDistr(dictdim1[d], lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), marker = genericsettings.line_styles[0]['marker'], **rldStyles[j % len(rldStyles)]) # modify the automatic legend: remover marker and change text plt.setp(tmp[-1], marker = '', label = targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j)) # Mods are added after to prevent them from appearing in the legend plt.setp(tmp, markersize = 15., markeredgewidth = plt.getp(tmp[-1], 'linewidth'), markeredgecolor = plt.getp(tmp[-1], 'color'), markerfacecolor = 'none') funcs = set(i.funcId for i in dictdim0[d]) | set(i.funcId for i in dictdim1[d]) text = 'f%s' % (consecutiveNumbers(sorted(funcs))) if not isinstance(targets, pproc.RunlengthBasedTargetValues): plot_previous_algorithms(d, funcs) else: plotRLB_previous_algorithms(d, funcs) # plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim0[d]), ls='--', color='k') # plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim1[d]), color='k') plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim0[d]), marker = '+', markersize = 20., color = 'k', markeredgewidth = plt.getp(tmp[-1], 'linewidth',)) plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim1[d]), marker = 'o', markersize = 15., color = 'k', markerfacecolor = 'None', markeredgewidth = plt.getp(tmp[-1], 'linewidth')) plt.legend(loc = 'best') plt.text(0.5, 0.98, text, horizontalalignment = "center", verticalalignment = "top", transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False), beautifyRLD(evalfmax) saveFigure(filename, verbose = verbose) plt.close(fig)
def main(dictAlg, order=None, outputdir='.', info='default', verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1: raise Exception('We never integrate over dimension.') dim = tmp.keys()[0] algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] runlengthsucc = evals[np.isnan(evals) == False] / entry.dim runlengthunsucc = entry.maxevals[np.isnan(evals)] / entry.dim if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) if displaybest2009: #set_trace() if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgentry = bestalg.bestalgentries2009[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() runlengthsucc = evals[np.isnan(evals) == False] / bestalgentry.dim runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / bestalgentry.dim x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = alg #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: #isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) try: f = open(fileName, 'w') f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex(toolsdivers.strip_pathname2(alg)))) algtocommand[alg] = tmp commandnames = [] if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp for l in labels: commandnames.append(algtocommand[l]) # f.write(headleg) f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}' % commandnames[0]) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName except: raise # TODO: Does this make sense? else: f.close() figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys()))) text += ',%d-D' % dim plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) ppfig.saveFigure(figureName, verbose=verbose) plt.close()
def generate_plots(f_id, dim, inst_id, f1_id, f2_id, f1_instance, f2_instance, outputfolder="./", inputfolder=None, tofile=True, downsample=False): ############################################################## # # # Objective Space of points on cut (log-scale). # # # ############################################################## fig = plt.figure(1) ax = fig.add_subplot(111) myc = ['g', 'b', 'r', 'y'] # colors for the different line directions myls = [':', '--', '-'] # line styles mylw = dict(lw=2, alpha=0.6) # line width # ALSO: mylw = {'lw':2, 'alpha':0.9} # define lines as a + t*b tlim = 10 # ngrid = 10001 t = np.linspace(-tlim, tlim, num=ngrid, endpoint=True) # Query the optimum from the benchmark to get a working objective function: # ------------------------------------- f1, f1opt = bm.instantiate(f1_id, iinstance=f1_instance) f2, f2opt = bm.instantiate(f2_id, iinstance=f2_instance) fdummy = f1.evaluate(np.zeros((1, dim))) xopt1 = f1.xopt # formerly: `f1.arrxopt[0]` but did not work for all functions f_xopt1 = [f1opt, f2.evaluate(xopt1)] fdummy = f2.evaluate(np.zeros((1, dim))) xopt2 = f2.xopt # formerly: `f2.arrxopt[0]` but did not work for all functions f_xopt2 = [f1.evaluate(xopt2), f2opt] nadir = np.array([f1.evaluate(xopt2), f2.evaluate(xopt1)]) ideal = np.array([f1opt, f2opt]) # evaluate points along random directions through single optima: #rand_dir_1 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim)) rand_dir_1 = np.array([-2.57577836, 3.03082186, -1.33275642, -0.6939155 , 0.99631351, -0.05842807, 1.99304198, 0.38531151, 1.3697517 , 0.37240766, 0.69762214, -0.79613309, -1.45320324, -0.97296174, 0.90871269, -1.00793426, -1.29250002, 0.25110439, 0.26014748, -0.1267351 , 0.63039621, 0.38236451, 1.07914151, 1.07130862, 0.13733215, 1.97801217, 0.48601757, 2.3606844 , 0.30784962, -0.36040267, 0.68263725, -1.55353407, -0.57503424, 0.07362256, 0.95114969, 0.43087735, -1.57600655, 0.48304268, -0.88184912, 1.85066177])[0:dim] rand_dir_1 = rand_dir_1/np.linalg.norm(rand_dir_1) #rand_dir_2 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim)) rand_dir_2 = np.array([0.2493309 , -2.05353785, -1.08038135, -0.06152298, -0.37996052, -0.65976313, -0.11217795, -1.41055602, 0.20321651, -1.42727459, -0.09742259, -0.26135753, -0.20899801, 0.85056449, -0.58492263, -0.93028813, -0.6576416 , -0.02854442, -0.53294699, -0.40898327, -0.64209791, 0.62349299, -0.44248805, 0.60715229, 0.97420653, -0.40989115, 0.67065727, 0.23491168, -0.0607614 , -0.42400703, -1.77536414, 1.92731362, 2.38098092, -0.23789751, -0.02411066, -0.37445709, 0.43547281, 0.32148583, -0.4257802 , 0.15550121])[0:dim] rand_dir_2 = rand_dir_2/np.linalg.norm(rand_dir_2) rand_dir_3 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim)) # rand_dir_3 = np.array([0.27274996, 0.09450028, 0.23123471, -0.17268026, -0.19352246, # 0.11116155, 1.91171592, -0.77188094, 0.50033182, -2.93726319, # -0.0444466 , -0.83483599, -1.05971685, 0.35220208, 0.67446614, # -0.66144976, 0.15873096, 0.63002013, -0.75455445, 0.11553671, # 0.53268058, -0.17107212, -2.68158842, 1.76162118, -1.10528215, # -1.3174873 , -0.56827552, 0.8938743 , -1.40129273, 1.24724136, # 0.32995442, 1.64754152, -0.23038488, -0.1996612 , 0.7423728 , # 0.41590582, -0.49735973, -0.16317831, 0.14116915, 0.33144299])[0:dim] # rand_dir_3 = rand_dir_3/np.linalg.norm(rand_dir_3) rand_dir_4 = np.random.multivariate_normal(np.zeros(dim), np.identity(dim)) # rand_dir_4 = np.array([-1.64810074, 0.06035188, -1.08343971, 0.69871916, -1.57870908, # -0.39555544, 1.15952858, 0.82573846, -1.00821565, 0.46347426, # 0.46817715, -0.70617468, -0.56754204, -1.77903594, -0.15184591, # 2.10968445, 0.53652335, -0.03221351, -0.34664564, 1.69246492, # 1.26043695, 0.20284844, 1.90425762, -0.43203046, 0.33297092, # -0.43151518, -0.27561938, -0.64456918, -1.52515793, 0.16840333, # -1.44740417, -0.07328904, -0.74026773, 0.02869038, -0.65416703, # 0.55212071, -1.13507935, -1.18781606, 0.42888208, -1.47626463])[0:dim] rand_dir_4 = rand_dir_4/np.linalg.norm(rand_dir_4) # now sample two random points # rand_x_1 = -4+8*np.random.rand(dim) rand_x_1 = np.array([-2.70496645, -0.39106794, -2.80086174, -3.66756864, 2.14644397, 2.78153367, 1.56329668, 2.35839362, 0.13302063, -2.91032329, -2.51556623, -2.35077186, 2.58377453, 1.17508714, -2.4457919 , 1.45033066, -1.23112017, -2.25318184, 2.41933833, -1.14164988, -2.36275527, -3.25853312, -2.4609917 , 3.48296483, -2.68189074, -2.05345914, -2.4116529 , 3.08138791, -2.23247829, 2.54796847, -0.936912 , 3.35564688, 0.51737322, -0.92592536, 1.65481046, -2.52985307, 3.7431933 , -3.6630677 , -0.40448911, 1.33128767])[0:dim] # rand_x_2 = -4+8*np.random.rand(dim) rand_x_2 = np.array([1.57461786, -3.44804825, -3.81020969, 2.83971589, 3.27253056, -3.26623201, 3.79526151, 1.76316424, 1.79345621, -0.81215354, 2.06356913, 1.02657347, 2.99781081, 0.35872047, 3.69835244, -1.68708122, 1.84948801, -0.86589091, -1.61500454, -1.03210602, 3.96363037, -1.30389274, 2.16486049, -2.77809263, -2.78117177, -0.89747482, 3.85189385, 2.34298403, 1.45079637, 3.78130948, 2.55578938, 2.23402556, 0.79451819, 0.30563072, 1.91404655, 0.37739932, -2.07692776, -0.06961333, -2.73583526, -2.70524468])[0:dim] # Construct solutions along rand_dir_1 through xopt1 # ------------------------------------------------------ xgrid_opt_1 = np.tile(xopt1, (ngrid, 1)) xgrid_opt_1 = np.array(xgrid_opt_1 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_1]))) # Construct solutions along coordinate axes through xopt1 # ------------------------------------------------------- xgrid_opt_1_along_axes = [] for k in range(dim): xgrid_along_axis = np.tile(xopt1, (ngrid, 1)) x_dir = np.zeros(dim) x_dir[k] = 1 xgrid_along_axis = xgrid_along_axis + np.dot(t.reshape(ngrid,1), np.array([x_dir])) xgrid_opt_1_along_axes.append(xgrid_along_axis) xgrid_opt_1_along_axes = np.array(xgrid_opt_1_along_axes) # Construct solutions along rand_dir_2 through xopt2 # ------------------------------------------------------ xgrid_opt_2 = np.tile(xopt2, (ngrid, 1)) xgrid_opt_2 = np.array(xgrid_opt_2 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_2]))) # Construct solutions along coordinate axes through xopt1 # ------------------------------------------------------- xgrid_opt_2_along_axes = [] for k in range(dim): xgrid_along_axis = np.tile(xopt2, (ngrid, 1)) x_dir = np.zeros(dim) x_dir[k] = 1 xgrid_along_axis = xgrid_along_axis + np.dot(t.reshape(ngrid,1), np.array([x_dir])) xgrid_opt_2_along_axes.append(xgrid_along_axis) xgrid_opt_2_along_axes = np.array(xgrid_opt_2_along_axes) # Construct solutions along line through xopt1 and xopt2 # ------------------------------------------------------ xgrid_12 = np.tile((xopt1+xopt2)/2, (ngrid, 1)) xgrid_12 = np.array(xgrid_12 + np.dot(t.reshape(ngrid,1), np.array([xopt2-xopt1])/np.linalg.norm([xopt2-xopt1]) ) ) # Construct solutions along a fully random line # ------------------------------------------------------ xgrid_rand_1 = np.tile(rand_x_1, (ngrid, 1)) xgrid_rand_1 = np.array(xgrid_rand_1 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_3]))) # and for another fully random line # ------------------------------------------------------ xgrid_rand_2 = np.tile(rand_x_2, (ngrid, 1)) xgrid_rand_2 = np.array(xgrid_rand_2 + np.dot(t.reshape(ngrid,1), np.array([rand_dir_4]))) # Evaluate the grid for each direction # ------------------------------------------- fgrid_opt_1 = [f1.evaluate(xgrid_opt_1), f2.evaluate(xgrid_opt_1)] fgrid_opt_2 = [f1.evaluate(xgrid_opt_2), f2.evaluate(xgrid_opt_2)] fgrid_12 = [f1.evaluate(xgrid_12), f2.evaluate(xgrid_12)] fgrid_rand_1 = [f1.evaluate(xgrid_rand_1), f2.evaluate(xgrid_rand_1)] fgrid_rand_2 = [f1.evaluate(xgrid_rand_2), f2.evaluate(xgrid_rand_2)] fgrid_opt_1_along_axes = [] for k in range(dim): fgrid_opt_1_along_axes.append([f1.evaluate(xgrid_opt_1_along_axes[k]), f2.evaluate(xgrid_opt_1_along_axes[k])]) fgrid_opt_2_along_axes = [] for k in range(dim): fgrid_opt_2_along_axes.append([f1.evaluate(xgrid_opt_2_along_axes[k]), f2.evaluate(xgrid_opt_2_along_axes[k])]) # plot reference sets if available: if inputfolder: filename = "bbob-biobj_f%02d_i%02d_d%02d_nondominated.adat" % (f_id, inst_id, dim) try: A = np.array(np.loadtxt(inputfolder + filename, comments='%', usecols = (1,2))) except: print("Problem opening %s" % (inputfolder + filename)) e = sys.exc_info()[0] print(" Error: %s" % e) if downsample: # normalize A wrt ideal and nadir (and take care of having no inf # in data by adding the constant 1e-15 before the log10): B = (A-ideal) / (nadir-ideal) Blog = np.log10((A-ideal) / (nadir-ideal) + 1e-15) # cut precision to downsample: decimals=3 B = np.around(B, decimals=decimals) Blog = np.around(Blog, decimals=decimals) if 11<3: # filter out dominated points (and doubles) pfFlag = pf.callParetoFront(B) pfFlaglog = pf.callParetoFront(Blog) else: # filter out all but one point per grid cell pfFlag = np.array([False] * len(B), dtype=bool) # check corner case first: if not (B[2][0] == B[0][0] and B[2][1] == B[0][1]): pfFlag[2] = True else: B[2] = B[0] for i in range(3,len(B)): if not (B[i][0] == B[i-1][0] and B[i][1] == B[i-1][1]): pfFlag[i] = True pfFlaglog = np.array([False] * len(Blog), dtype=bool) # check corner case first: if not (Blog[2][0] == Blog[0][0] and Blog[2][1] == Blog[0][1]): pfFlaglog[2] = True else: Blog[2] = Blog[0] for i in range(3,len(Blog)): if not (Blog[i][0] == Blog[i-1][0] and Blog[i][1] == Blog[i-1][1]): pfFlaglog[i] = True # ensure that both extremes are still in, assuming they are stored in the beginning: pfFlag[0] = True pfFlaglog[0] = True pfFlag[1] = True pfFlaglog[1] = True Alog = A[pfFlaglog] A = A[pfFlag] # finally sort wrt f_1 axis: Alog = Alog[Alog[:,0].argsort(kind='mergesort')] A = A[A[:,0].argsort(kind='mergesort')] # normalized plot, such that ideal and nadir are mapped to # 0 and 1 respectively; add 1e-15 for numerical reasons (to not have # inf in the data to plot) plt.loglog((Alog[:,0] - ideal[0])/(nadir[0]-ideal[0]) + 1e-15, (Alog[:,1] - ideal[1])/(nadir[1]-ideal[1]) + 1e-15, '.k', markersize=8) # plot actual solutions along directions: numticks = 5 nf = nadir-ideal # normalization factor used very often now for k in range(dim): p6, = ax.loglog(((fgrid_opt_1_along_axes[k])[0]-f1opt)/nf[0], ((fgrid_opt_1_along_axes[k])[1]-f2opt)/nf[1], color=myc[1], ls=myls[0], lw=1, alpha=0.3) for k in range(dim): p7, = ax.loglog(((fgrid_opt_2_along_axes[k])[0]-f1opt)/nf[0], ((fgrid_opt_2_along_axes[k])[1]-f2opt)/nf[1], color=myc[1], ls=myls[0], lw=1, alpha=0.3) p1, = ax.loglog((fgrid_opt_1[0]-f1opt)/nf[0], (fgrid_opt_1[1]-f2opt)/nf[1], color=myc[1], ls=myls[2], label=r'cuts through single optima', **mylw) p2, = ax.loglog((fgrid_opt_2[0]-f1opt)/nf[0], (fgrid_opt_2[1]-f2opt)/nf[1], color=myc[1], ls=myls[2], **mylw) p3, = ax.loglog((fgrid_12[0]-f1opt)/nf[0], (fgrid_12[1]-f2opt)/nf[1], color=myc[2], ls=myls[2], label=r'cut through both optima', **mylw) p4, = ax.loglog((fgrid_rand_1[0]-f1opt)/nf[0], (fgrid_rand_1[1]-f2opt)/nf[1], color=myc[3], ls=myls[2], label=r'two random directions', **mylw) p5, = ax.loglog((fgrid_rand_2[0]-f1opt)/nf[0], (fgrid_rand_2[1]-f2opt)/nf[1], color=myc[3], ls=myls[2], **mylw) # print 'ticks' along the axes in equidistant t space: numticks = 11 plot_ticks([fgrid_opt_1[0], fgrid_opt_1[1]], numticks, nadir, ideal, ax, mylw, myc[1], logscale=True) plot_ticks([fgrid_opt_2[0], fgrid_opt_2[1]], numticks, nadir, ideal, ax, mylw, myc[1], logscale=True) plot_ticks([fgrid_12[0], fgrid_12[1]], numticks, nadir, ideal, ax, mylw, myc[2], logscale=True) plot_ticks([fgrid_rand_1[0], fgrid_rand_1[1]], numticks, nadir, ideal, ax, mylw, myc[3], logscale=True) plot_ticks([fgrid_rand_2[0], fgrid_rand_2[1]], numticks, nadir, ideal, ax, mylw, myc[3], logscale=True) # Get Pareto front from vectors of objective values obtained objs = np.vstack((fgrid_opt_1[0], fgrid_opt_1[1])).transpose() pfFlag_opt_1 = pf.callParetoFront(objs) ax.loglog((fgrid_opt_1[0][pfFlag_opt_1]-f1opt)/nf[0], (fgrid_opt_1[1][pfFlag_opt_1]-f2opt)/nf[1], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) objs = np.vstack((fgrid_opt_2[0], fgrid_opt_2[1])).transpose() pfFlag_opt_2 = pf.callParetoFront(objs) ax.loglog((fgrid_opt_2[0][pfFlag_opt_2]-f1opt)/nf[0], (fgrid_opt_2[1][pfFlag_opt_2]-f2opt)/nf[1], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) objs = np.vstack((fgrid_12[0], fgrid_12[1])).transpose() pfFlag_12 = pf.callParetoFront(objs) ax.loglog((fgrid_12[0][pfFlag_12]-f1opt)/nf[0], (fgrid_12[1][pfFlag_12]-f2opt)/nf[1], color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) objs = np.vstack((fgrid_rand_1[0], fgrid_rand_1[1])).transpose() pfFlag_rand_1 = pf.callParetoFront(objs) ax.loglog((fgrid_rand_1[0][pfFlag_rand_1]-f1opt)/nf[0], (fgrid_rand_1[1][pfFlag_rand_1]-f2opt)/nf[1], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) objs = np.vstack((fgrid_rand_2[0], fgrid_rand_2[1])).transpose() pfFlag_rand_2 = pf.callParetoFront(objs) ax.loglog((fgrid_rand_2[0][pfFlag_rand_2]-f1opt)/nf[0], (fgrid_rand_2[1][pfFlag_rand_2]-f2opt)/nf[1], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) # plot nadir: ax.loglog((nadir[0]-f1opt)/nf[0], (nadir[1]-f2opt)/nf[1], color='k', ls='', marker='+', markersize=9, markeredgewidth=1.5, alpha=0.9) # beautify: ax.set_xlabel(r'$f_1 - f_1^\mathsf{opt}$ (normalized)', fontsize=16) ax.set_ylabel(r'$f_2 - f_2^\mathsf{opt}$ (normalized)', fontsize=16) ax.legend(loc="best", framealpha=0.2) ax.set_title("bbob-biobj $f_{%d}$ along linear search space directions (%d-D, instance %d)" % (f_id, dim, inst_id)) [line.set_zorder(3) for line in ax.lines] [line.set_zorder(3) for line in ax.lines] fig.subplots_adjust(left=0.1) # more room for the y-axis label # we might want to zoom in a bit: ax.set_xlim((1e-3, plt.xlim()[1])) ax.set_ylim((1e-3, plt.ylim()[1])) # ax.set_ylim((0, 2*(nadir[1] - f2opt))) # add rectangle as ROI ax.add_patch(patches.Rectangle( ((ideal[0]-f1opt)/nf[0] + 1e-16, (ideal[1]-f2opt)/nf[1] + 1e-16), (nadir[0]-ideal[0])/nf[0], (nadir[1]-ideal[1])/nf[1], alpha=0.05, color='k')) if tofile: if not os.path.exists(outputfolder): os.makedirs(outputfolder) filename = outputfolder + "directions-f%02d-i%02d-d%02d-logobjspace" % (f_id, inst_id, dim) saveFigure(filename, verbose=True) else: plt.show(block=True) plt.close() ############################################################## # # # Plot the same, but not in log-scale. # # # ############################################################## fig = plt.figure(2) ax = fig.add_subplot(111) # plot reference sets if available: if inputfolder: plt.plot(A[:,0], A[:,1], '.k', markersize=8) for k in range(dim): p6, = ax.plot((fgrid_opt_1_along_axes[k])[0], (fgrid_opt_1_along_axes[k])[1], color=myc[1], ls=myls[0], lw=1, alpha=0.3) for k in range(dim): p7, = ax.plot((fgrid_opt_2_along_axes[k])[0], (fgrid_opt_2_along_axes[k])[1], color=myc[1], ls=myls[0], lw=1, alpha=0.3) p1, = ax.plot(fgrid_opt_1[0], fgrid_opt_1[1], color=myc[1], ls=myls[2], label=r'cuts through single optima', **mylw) p2, = ax.plot(fgrid_opt_2[0], fgrid_opt_2[1], color=myc[1], ls=myls[2], **mylw) p3, = ax.plot(fgrid_12[0], fgrid_12[1], color=myc[2], ls=myls[2], label=r'cut through both optima', **mylw) p4, = ax.plot(fgrid_rand_1[0], fgrid_rand_1[1], color=myc[3], ls=myls[2], label=r'two random directions', **mylw) p4, = ax.plot(fgrid_rand_2[0], fgrid_rand_2[1], color=myc[3], ls=myls[2], **mylw) # plot a few ticks along directions, equi-distant in search space: numticks = 11 plot_ticks(fgrid_opt_1, numticks, nadir, ideal, ax, mylw, 'b') plot_ticks(fgrid_opt_2, numticks, nadir, ideal, ax, mylw, 'b') plot_ticks(fgrid_12, numticks, nadir, ideal, ax, mylw, 'r') plot_ticks(fgrid_rand_1, numticks, nadir, ideal, ax, mylw, 'y') plot_ticks(fgrid_rand_2, numticks, nadir, ideal, ax, mylw, 'y') # plot non-dominated points ax.plot(fgrid_opt_1[0][pfFlag_opt_1], fgrid_opt_1[1][pfFlag_opt_1], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(fgrid_opt_2[0][pfFlag_opt_2], fgrid_opt_2[1][pfFlag_opt_2], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(fgrid_12[0][pfFlag_12], fgrid_12[1][pfFlag_12], color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(fgrid_rand_1[0][pfFlag_rand_1], fgrid_rand_1[1][pfFlag_rand_1], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(fgrid_rand_2[0][pfFlag_rand_2], fgrid_rand_2[1][pfFlag_rand_2], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) # plot nadir: ax.plot(nadir[0], nadir[1], color='k', ls='', marker='+', markersize=9, markeredgewidth=1.5, alpha=0.9) # plot ideal: ax.plot(ideal[0], ideal[1], color='k', ls='', marker='x', markersize=8, markeredgewidth=1.5, alpha=0.9) # plot extremes ax.plot(f_xopt1[0], f_xopt1[1], color='blue', ls='', marker='*', markersize=8, markeredgewidth=0.5, markeredgecolor='black') ax.plot(f_xopt2[0], f_xopt2[1], color='blue', ls='', marker='*', markersize=8, markeredgewidth=0.5, markeredgecolor='black') # beautify: ax.set_xlabel(r'first objective', fontsize=16) ax.set_ylabel(r'second objective', fontsize=16) ax.legend(loc="best", framealpha=0.2) ax.set_title("bbob-biobj $f_{%d}$ along linear search space directions (%d-D, instance %d)" % (f_id, dim, inst_id)) [line.set_zorder(3) for line in ax.lines] [line.set_zorder(3) for line in ax.lines] fig.subplots_adjust(left=0.1) # more room for the y-axis label # zoom into Pareto front: ax.set_xlim((ideal[0]-0.05*(nadir[0] - ideal[0]), nadir[0] + (nadir[0] - ideal[0]))) ax.set_ylim([ideal[1]-0.05*(nadir[1] - ideal[1]), nadir[1] + (nadir[1] - ideal[1])]) # add rectangle as ROI ax.add_patch(patches.Rectangle( (ideal[0], ideal[1]), nadir[0]-ideal[0], nadir[1]-ideal[1], alpha=0.05, color='k')) if tofile: if not os.path.exists(outputfolder): os.makedirs(outputfolder) filename = outputfolder + "directions-f%02d-i%02d-d%02d-objspace" % (f_id, inst_id, dim) saveFigure(filename, verbose=True) else: plt.show(block=True) plt.close() ############################################################## # # # Finally, the corresponding plots in search space, i.e. # # projections of it onto the variables x_1 and x_(dim-1) # # (or x1, x2 in the case of not enough variables). # # # ############################################################## fig = plt.figure(3) ax = fig.add_subplot(111) # plot reference sets if available: #if inputfolder: # plt.plot(A[:,0], A[:,1], '.k', markersize=8) ax.set_xlabel(r'$x_1$', fontsize=16) # fix second variable in addition to x_1: if dim > 2: second_variable = -2 ax.set_ylabel(r'$x_{%d}$' % (dim-1), fontsize=16) else: second_variable = 1 ax.set_ylabel(r'$x_{%d}$' % dim, fontsize=16) # read and plot best Pareto set approximation if inputfolder: filename = "bbob-biobj_f%02d_i%02d_d%02d_nondominated.adat" % (f_id, inst_id, dim) C = [] with open(inputfolder + filename) as f: for line in f: splitline = line.split() if len(splitline) == (dim + 3): # has line x-values? C.append(np.array(splitline[3:], dtype=np.float)) C = np.array(C) C = C[C[:, second_variable].argsort(kind='mergesort')] # sort wrt x_{second_variable} first C = C[C[:, 0].argsort(kind='mergesort')] # now wrt x_1 to finally get a stable sort pareto_set_approx_size = C.shape[0] # filter out all but one point per grid cell in the # (x_1, x_{second_variable}) space if downsample: decimals=2 X = np.around(C, decimals=decimals) # sort wrt x_{second_variable} first idx_1 = X[:, second_variable].argsort(kind='mergesort') X = X[idx_1] # now wrt x_1 to finally get a stable sort idx_2 = X[:, 0].argsort(kind='mergesort') X = X[idx_2] xflag = np.array([False] * len(X), dtype=bool) xflag[0] = True # always take the first point for i in range(1, len(X)): if not (X[i,0] == X[i-1,0] and X[i,second_variable] == X[i-1, second_variable]): xflag[i] = True X = ((C[idx_1])[idx_2])[xflag] pareto_set_sample_size = X.shape[0] paretosetlabel = ('reference set (%d of %d points)' % (pareto_set_sample_size, pareto_set_approx_size)) plt.plot(X[:, 0], X[:, second_variable], '.k', markersize=8, label=paretosetlabel) # end of reading in and plotting best Pareto set approximation for k in range(dim): p6, = ax.plot(xgrid_opt_1_along_axes[k][:, 0], xgrid_opt_1_along_axes[k][:, second_variable], color=myc[1], ls=myls[0], lw=1, alpha=0.3) for k in range(dim): p7, = ax.plot(xgrid_opt_2_along_axes[k][:, 0], xgrid_opt_2_along_axes[k][:, second_variable], color=myc[1], ls=myls[0], lw=1, alpha=0.3) p1, = ax.plot(xgrid_opt_1[:, 0], xgrid_opt_1[:, second_variable], color=myc[1], ls=myls[2], label=r'cuts through single optima', **mylw) p2, = ax.plot(xgrid_opt_2[:, 0], xgrid_opt_2[:, second_variable], color=myc[1], ls=myls[2], **mylw) p3, = ax.plot(xgrid_12[:, 0], xgrid_12[:, second_variable], color=myc[2], ls=myls[2], label=r'cut through both optima', **mylw) p4, = ax.plot(xgrid_rand_1[:, 0], xgrid_rand_1[:, second_variable], color=myc[3], ls=myls[2], label=r'two random directions', **mylw) p5, = ax.plot(xgrid_rand_2[:, 0], xgrid_rand_2[:, second_variable], color=myc[3], ls=myls[2], **mylw) # plot non-dominated points ax.plot(xgrid_opt_1[pfFlag_opt_1, 0], xgrid_opt_1[pfFlag_opt_1, second_variable], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(xgrid_opt_2[pfFlag_opt_2, 0], xgrid_opt_2[pfFlag_opt_2, second_variable], color=myc[1], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(xgrid_12[pfFlag_12, 0], xgrid_12[pfFlag_12, second_variable], color=myc[2], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(xgrid_rand_1[pfFlag_rand_1, 0], xgrid_rand_1[pfFlag_rand_1, second_variable], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) ax.plot(xgrid_rand_2[pfFlag_rand_2, 0], xgrid_rand_2[pfFlag_rand_2, second_variable], color=myc[3], ls='', marker='.', markersize=8, markeredgewidth=0, alpha=0.4) # highlight the region [-5,5] ax.add_patch(patches.Rectangle( (-5, -5), 10, 10, alpha=0.05, color='k')) # beautify ax.set_xlim([-6, 6]) ax.set_ylim([-6, 6]) if dim == 2: ax.set_title("decision space of bbob-biobj $f_{%d}$ (%d-D, instance %d)" % (f_id, dim, inst_id)) else: ax.set_title("projection of decision space for bbob-biobj $f_{%d}$ (%d-D, instance %d)" % (f_id, dim, inst_id)) ax.legend(loc="best", framealpha=0.2, numpoints=1) fig.subplots_adjust(left=0.1) # more room for the y-axis label # printing if tofile: if not os.path.exists(outputfolder): os.makedirs(outputfolder) filename = outputfolder + "directions-f%02d-i%02d-d%02d-searchspace" % (f_id, inst_id, dim) saveFigure(filename, verbose=True) else: plt.show(block=True) plt.close()
def main(dsList0, dsList1, outputdir, verbose=True): """Generate a scatter plot figure. TODO: """ #plt.rc("axes", labelsize=24, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) dictFunc0 = dsList0.dictByFunc() dictFunc1 = dsList1.dictByFunc() funcs = set(dictFunc0.keys()) & set(dictFunc1.keys()) for f in funcs: dictDim0 = dictFunc0[f].dictByDim() dictDim1 = dictFunc1[f].dictByDim() dims = set(dictDim0.keys()) & set(dictDim1.keys()) #set_trace() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue if linewidth: # plot all reliable ERT values as a line all_targets = np.array(sorted(set(entry0.target).union(entry1.target), reverse=True)) assert entry0.detSuccessRates([all_targets[0]]) == 1.0 assert entry1.detSuccessRates([all_targets[0]]) == 1.0 all_targets = all_targets[np.where(all_targets <= targets((f, d))[0])[0]] # xdata_all = np.array(entry0.detERT(all_targets)) ydata_all = np.array(entry1.detERT(all_targets)) # idx of reliable targets: last index where success rate >= 1/2 and ERT <= maxevals idx = [] for ari in (np.where(entry0.detSuccessRates(all_targets) >= 0.5)[0], np.where(entry1.detSuccessRates(all_targets) >= 0.5)[0], np.where(xdata_all <= max(entry0.maxevals))[0], np.where(ydata_all <= max(entry1.maxevals))[0] ): if len(ari): idx.append(ari[-1]) if len(idx) == 4: max_idx = min(idx) ## at least up to the most difficult given target ## idx = max((idx, np.where(all_targets >= targets((f, d))[-1])[0][-1])) xdata_all = xdata_all[:max_idx + 1] ydata_all = ydata_all[:max_idx + 1] idx = (numpy.isfinite(xdata_all)) * (numpy.isfinite(ydata_all)) assert idx.all() if idx.any(): plt.plot(xdata_all[idx], ydata_all[idx], colors[i], ls='solid', lw=linewidth, # TODO: ls has changed, check whether this works out clip_on=False) xdata = numpy.array(entry0.detERT(targets((f, d)))) ydata = numpy.array(entry1.detERT(targets((f, d)))) tmp = (numpy.isinf(xdata)==False) * (numpy.isinf(ydata)==False) if tmp.any(): try: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, clip_on=False) except KeyError: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, clip_on=False) #try: # plt.scatter(xdata[tmp], ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3) #except ValueError: # set_trace() #ax = plt.gca() ax = plt.axes() tmp = numpy.isinf(xdata) * (numpy.isinf(ydata)==False) if tmp.any(): trans = blend(ax.transAxes, ax.transData) #plt.scatter([1.]*numpy.sum(tmp), ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.]*numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot([1.]*numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) #set_trace() tmp = (numpy.isinf(xdata)==False) * numpy.isinf(ydata) if tmp.any(): trans = blend(ax.transData, ax.transAxes) # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot(xdata[tmp], [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot(xdata[tmp], [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) tmp = numpy.isinf(xdata) * numpy.isinf(ydata) if tmp.any(): # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.-offset]*numpy.sum(tmp), [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) except KeyError: plt.plot([1.-offset]*numpy.sum(tmp), [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) #set_trace() beautify() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue minbnd, maxbnd = plt.xlim() plt.plot((entry0.mMaxEvals(), entry0.mMaxEvals()), # (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i], (max([minbnd, entry1.mMaxEvals()/max_evals_line_length]), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.plot(# (minbnd, entry0.mMaxEvals()), (max([minbnd, entry0.mMaxEvals()/max_evals_line_length]), entry0.mMaxEvals()), (entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.xlim(minbnd, maxbnd) plt.ylim(minbnd, maxbnd) #Set the boundaries again: they changed due to new plots. #plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i]) #plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i]) try: plt.ylabel(funInfos[f]) except IndexError: pass filename = os.path.join(outputdir, 'ppscatter_f%03d' % f) saveFigure(filename, verbose=verbose) plt.close()
def main(dsList0, dsList1, outputdir, verbose=True): """Generate a scatter plot figure.""" #plt.rc("axes", labelsize=24, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) dictFunc0 = dsList0.dictByFunc() dictFunc1 = dsList1.dictByFunc() funcs = set(dictFunc0.keys()) & set(dictFunc1.keys()) for f in funcs: dictDim0 = dictFunc0[f].dictByDim() dictDim1 = dictFunc1[f].dictByDim() dims = set(dictDim0.keys()) & set(dictDim1.keys()) #set_trace() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue xdata = numpy.array(entry0.detERT(targets)) ydata = numpy.array(entry1.detERT(targets)) tmp = (numpy.isinf(xdata)==False) * (numpy.isinf(ydata)==False) if tmp.any(): try: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3) except KeyError: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3) #try: # plt.scatter(xdata[tmp], ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3) #except ValueError: # set_trace() #ax = plt.gca() ax = plt.axes() tmp = numpy.isinf(xdata) * (numpy.isinf(ydata)==False) if tmp.any(): trans = blend(ax.transAxes, ax.transData) #plt.scatter([1.]*numpy.sum(tmp), ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.]*numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot([1.]*numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) #set_trace() tmp = (numpy.isinf(xdata)==False) * numpy.isinf(ydata) if tmp.any(): trans = blend(ax.transData, ax.transAxes) # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot(xdata[tmp], [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot(xdata[tmp], [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) tmp = numpy.isinf(xdata) * numpy.isinf(ydata) if tmp.any(): # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.-offset]*numpy.sum(tmp), [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) except KeyError: plt.plot([1.-offset]*numpy.sum(tmp), [1.-offset]*numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) #set_trace() beautify() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue minbnd, maxbnd = plt.xlim() plt.plot((entry0.mMaxEvals(), entry0.mMaxEvals()), # (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i], (max([minbnd, entry1.mMaxEvals()/10.]), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.plot(# (minbnd, entry0.mMaxEvals()), (max([minbnd, entry0.mMaxEvals()/10.]), entry0.mMaxEvals()), (entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.xlim(minbnd, maxbnd) plt.ylim(minbnd, maxbnd) #Set the boundaries again: they changed due to new plots. #plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i]) #plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i]) if isBenchmarkinfosFound: try: plt.ylabel(funInfos[f]) except IndexError: pass filename = os.path.join(outputdir, 'ppscatter_f%03d' % f) saveFigure(filename, verbose=verbose) plt.close()
def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8), param=('dim', 'Dimension'), is_normalized=True, outputdir='.', verbose=True): """Generates figure of ERT vs. param. This script will generate as many figures as there are functions. For a given function and a given parameter value there should be only **one** data set. Crosses (+) give the median number of function evaluations of successful trials for the smallest reached target function value. Crosses (x) give the average number of overall conducted function evaluations in case the smallest target function value (1e-8) was not reached. :keyword DataSetList dsList: data sets :keyword seq _targets: target precisions :keyword tuple param: parameter on x-axis. The first element has to be a string corresponding to the name of an attribute common to elements of dsList. The second element has to be a string which will be used as label for the figures. The values of attribute param have to be sortable. :keyword bool is_normalized: if True the y values are normalized by x values :keyword string outputdir: name of output directory for the image files :keyword bool verbose: controls verbosity """ funInfos = read_fun_infos(dsList.isBiobjective()) # TODO check input parameter param for func, dictfunc in dsList.dictByFunc().iteritems(): filename = os.path.join(outputdir,'ppfigparam_%s_f%03d' % (param[0], func)) try: targets = list(j[func] for j in _targets) except TypeError: targets = _targets targets = sorted(targets) # from hard to easy handles = plot(dictfunc, param[0], targets) # # display best 2009 # if not bestalg.bestalgentries2009: # bestalg.loadBBOB2009() # bestalgdata = [] # for d in dimsBBOB: # entry = bestalg.bestalgentries2009[(d, func)] # tmp = entry.detERT([1e-8])[0] # if not np.isinf(tmp): # bestalgdata.append(tmp/d) # else: # bestalgdata.append(None) # plt.plot(dimsBBOB, bestalgdata, color=refcolor, linewidth=10, zorder=-2) # plt.plot(dimsBBOB, bestalgdata, ls='', marker='d', markersize=25, # color=refcolor, markeredgecolor=refcolor, zorder=-2) a = plt.gca() if is_normalized: for i in handles: try: plt.setp(i, 'ydata', plt.getp(i, 'ydata') / plt.getp(i, 'xdata')) except TypeError: pass a.relim() a.autoscale_view() beautify() plt.xlabel(param[1]) if is_normalized: plt.setp(plt.gca(), 'ylabel', plt.getp(a, 'ylabel') + ' / ' + param[1]) if func in (1, 24, 101, 130): plt.legend(loc="best") if func in funInfos.keys(): a.set_title(funInfos[func]) saveFigure(filename, verbose=verbose) plt.close()
def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8), param=('dim', 'Dimension'), is_normalized=True, outputdir='.', verbose=True): """Generates figure of ERT vs. param. This script will generate as many figures as there are functions. For a given function and a given parameter value there should be only **one** data set. Crosses (+) give the median number of function evaluations of successful trials for the smallest reached target function value. Crosses (x) give the average number of overall conducted function evaluations in case the smallest target function value (1e-8) was not reached. :keyword DataSetList dsList: data sets :keyword seq _targets: target precisions :keyword tuple param: parameter on x-axis. The first element has to be a string corresponding to the name of an attribute common to elements of dsList. The second element has to be a string which will be used as label for the figures. The values of attribute param have to be sortable. :keyword bool is_normalized: if True the y values are normalized by x values :keyword string outputdir: name of output directory for the image files :keyword bool verbose: controls verbosity """ # TODO check input parameter param for func, dictfunc in dsList.dictByFunc().iteritems(): filename = os.path.join(outputdir, 'ppfigparam_%s_f%03d' % (param[0], func)) try: targets = list(j[func] for j in _targets) except TypeError: targets = _targets targets = sorted(targets) # from hard to easy handles = plot(dictfunc, param[0], targets) # # display best 2009 # if not bestalg.bestalgentries2009: # bestalg.loadBBOB2009() # bestalgdata = [] # for d in dimsBBOB: # entry = bestalg.bestalgentries2009[(d, func)] # tmp = entry.detERT([1e-8])[0] # if not np.isinf(tmp): # bestalgdata.append(tmp/d) # else: # bestalgdata.append(None) # plt.plot(dimsBBOB, bestalgdata, color=refcolor, linewidth=10, zorder=-2) # plt.plot(dimsBBOB, bestalgdata, ls='', marker='d', markersize=25, # color=refcolor, markeredgecolor=refcolor, zorder=-2) a = plt.gca() if is_normalized: for i in handles: try: plt.setp(i, 'ydata', plt.getp(i, 'ydata') / plt.getp(i, 'xdata')) except TypeError: pass a.relim() a.autoscale_view() beautify() plt.xlabel(param[1]) if is_normalized: plt.setp(plt.gca(), 'ylabel', plt.getp(a, 'ylabel') + ' / ' + param[1]) if func in (1, 24, 101, 130): plt.legend(loc="best") if isBenchmarkinfosFound: a.set_title(funInfos[func]) saveFigure(filename, verbose=verbose) plt.close()
def generateFigure(dsList, CrE=0., isStoringXRange=True, outputdir='.', info='default', verbose=True): """Generates ERT loss ratio figures. :param DataSetList dsList: input data set :param float CrE: crafting effort (see COCO documentation) :param bool isStoringXRange: if set to True, the first call to this function sets the global :py:data:`evalf` and all subsequent calls will use this value as boundaries in the generated figures. :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) if isStoringXRange: global evalf else: evalf = None # do not aggregate over dimensions for d, dsdim in dsList.dictByDim().iteritems(): maxevals = max(max(i.ert[numpy.isinf(i.ert)==False]) for i in dsdim) EVALS = [2.*d] EVALS.extend(numpy.power(10., numpy.arange(1, numpy.floor(numpy.log10(maxevals*1./d))))*d) if not evalf: evalf = (numpy.log10(EVALS[0]/d), numpy.log10(EVALS[-1]/d)) data = generateData(dsdim, EVALS, CrE) ydata = [] for i in range(len(EVALS)): #Aggregate over functions. ydata.append(numpy.log10(list(data[f][i] for f in data))) xdata = numpy.log10(numpy.array(EVALS)/d) xticklabels = [''] xticklabels.extend('%d' % i for i in xdata[1:]) plot(xdata, ydata) filename = os.path.join(outputdir, 'pplogloss_%02dD_%s' % (d, info)) plt.xticks(xdata, xticklabels) #Is there an upper bound? if CrE > 0 and len(set(dsdim.dictByFunc().keys())) >= 20: #TODO: hopefully this means we are not considering function groups. plt.text(0.01, 0.98, 'CrE = %5g' % CrE, fontsize=20, horizontalalignment='left', verticalalignment='top', transform = plt.gca().transAxes, bbox=dict(facecolor='w')) plt.axhline(1., color='k', ls='-', zorder=-1) plt.axvline(x=numpy.log10(max(i.mMaxEvals()/d for i in dsdim)), color='k') funcs = set(i.funcId for i in dsdim) if len(funcs) > 1: text = 'f%d-%d' %(min(funcs), max(funcs)) else: text = 'f%d' %(funcs.pop()) plt.text(0.5, 0.93, text, horizontalalignment="center", transform=plt.gca().transAxes) beautify() if evalf: plt.xlim(xmin=evalf[0]-0.5, xmax=evalf[1]+0.5) saveFigure(filename, verbose=verbose) #plt.show() plt.close()
def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True): """Returns ERT1/ERT0 comparison figure.""" plt.rc("axes", labelsize=20, titlesize=24) plt.rc("xtick", labelsize=20) plt.rc("ytick", labelsize=20) plt.rc("font", size=20) plt.rc("legend", fontsize=20) dictFun0 = dsList0.dictByFunc() dictFun1 = dsList1.dictByFunc() for func in set.intersection(set(dictFun0), set(dictFun1)): dictDim0 = dictFun0[func].dictByDim() dictDim1 = dictFun1[func].dictByDim() if isBenchmarkinfosFound: title = funInfos[func] else: title = '' filename = os.path.join(outputdir,'ppcmpfig_f%d' % (func)) dims = sorted(set.intersection(set(dictDim0), set(dictDim1))) handles = [] dataperdim = {} fvalueswitch = {} nbtests = 0 for i, dim in enumerate((2, 3, 5, 10, 20, 40)): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] except KeyError: continue nbtests += 1 # generateData: data = generateData(entry0, entry1, fthresh=fthresh) dataperdim[dim] = data # TODO: hack, modify slightly so line goes to 'zero' if minfvalue: for d in data: tmp = d[:, 0] tmp[tmp == 0] = min(min(tmp[tmp > 0]), minfvalue)**2 # plot idx = numpy.isfinite(data[0][:, 1]) * numpy.isfinite(data[1][:, 1]) ydata = data[1][idx, 1]/data[0][idx, 1] plt.plot(data[0][idx, 0], ydata, ls='--', color=colors[i], lw=linewidth) # This is one possibility: #idx = (data[0][:, 3] >= 5) * (data[1][:, 3] >= 5) idx = ((data[0][:, 1] <= 3 * numpy.median(entry0.maxevals)) * (data[1][:, 1] <= 3 * numpy.median(entry1.maxevals))) #if func==5: # set_trace() fvalueswitch[dim] = min(data[0][idx, 0]) ydata = data[1][idx, 1]/data[0][idx, 1] plt.plot(data[0][idx, 0], ydata, color=colors[i], lw=linewidth) #h = plotERTRatio(data, plotargs) beautify(xmin=minfvalue) #beautify() ax = plt.gca() # Freeze the boundaries ax.set_autoscale_on(False) #trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # Plot everything else for i, dim in enumerate((2, 3, 5, 10, 20, 40)): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] data = dataperdim[dim] except KeyError: continue # annotation annotate(entry0, entry1, dim, minfvalue, nbtests=nbtests) tmp0 = numpy.isfinite(data[0][:, 1]) tmp1 = numpy.isfinite(data[1][:, 1]) idx = tmp0 * tmp1 #Do not plot anything else if it happens after minfvalue if data[0][idx, 0][-1] <= minfvalue: # hack for the legend plt.plot((data[0][idx, 0][-1]**2, ), (ydata[-1], ), marker='D', color=colors[i], lw=linewidth, label='%2d-D' % dim, markeredgecolor=colors[i], markerfacecolor='None', markeredgewidth=linewidth, markersize=3*linewidth) continue # Determine which algorithm went further algstoppedlast = 0 algstoppedfirst = 1 if numpy.sum(tmp0) < numpy.sum(tmp1): algstoppedlast = 1 algstoppedfirst = 0 #marker if an algorithm stopped ydata = data[1][idx, 1]/data[0][idx, 1] plt.plot((data[0][idx, 0][-1], ), (ydata[-1], ), marker='D', color=colors[i], lw=linewidth, label='%2d-D' % dim, markeredgecolor=colors[i], markerfacecolor='None', markeredgewidth=linewidth, markersize=3*linewidth) tmpy = ydata[-1] # plot probability of success line dataofinterest = data[algstoppedlast] tmp = numpy.nonzero(idx)[0][-1] # Why [0]? # add the last line for which both algorithm still have a success idx = (data[algstoppedfirst][:, 2] == 0.) * (dataofinterest[:, 2] > 0.) idx[tmp] = True if len(idx) == 0 or not idx.any(): continue ymin, ymax = plt.ylim() #orientation = -1 ybnd = ymin if algstoppedlast == 0: ybnd = ymax #orientation = 1 #ydata = orientation * dataofinterest[idx, 2] / 2 + 0.5 ydata = numpy.power(10, numpy.log10(ybnd) * (dataofinterest[idx, 2] -offset*(5-i)*numpy.log10(ymax/ymin)/numpy.abs(numpy.log10(ybnd)))) ls = '-' if dataofinterest[idx, 0][0] < fvalueswitch[dim]: ls = '--' plt.plot([dataofinterest[idx, 0][0]]*2, (tmpy, ydata[0]), ls=ls, lw=linewidth, color=colors[i]) plt.plot(dataofinterest[idx, 0], ydata, ls='--', lw=linewidth, color=colors[i]) # marker for when the first algorithm stop plt.plot((dataofinterest[idx, 0][0], ), (ydata[0], ), marker='D', color=colors[i], lw=linewidth, markeredgecolor=colors[i], markerfacecolor='None', markeredgewidth=linewidth, markersize=3*linewidth) #Do not plot anything else if it happens after minfvalue if dataofinterest[idx, 0][-1] <= minfvalue: continue plt.plot((dataofinterest[idx, 0][-1], ), (ydata[-1], ), marker='D', color=colors[i], lw=linewidth, markeredgecolor=colors[i], markerfacecolor='None', markeredgewidth=linewidth, markersize=3*linewidth) if isBenchmarkinfosFound: plt.title(funInfos[func]) if func in (1, 24, 101, 130): plt.legend(loc='best') # save saveFigure(filename, figFormat=figformat, verbose=verbose) plt.close() #set_trace() plt.rcdefaults()
def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True): """Returns ERT1/ERT0 comparison figure.""" #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) # minfvalue = pproc.TargetValues.cast(minfvalue) dictFun0 = dsList0.dictByFunc() dictFun1 = dsList1.dictByFunc() for func in set.intersection(set(dictFun0), set(dictFun1)): dictDim0 = dictFun0[func].dictByDim() dictDim1 = dictFun1[func].dictByDim() if isBenchmarkinfosFound: title = funInfos[func] else: title = '' filename = os.path.join(outputdir,'ppfig2_f%03d' % (func)) dims = sorted(set.intersection(set(dictDim0), set(dictDim1))) handles = [] dataperdim = {} fvalueswitch = {} nbtests = 0 for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] except KeyError: continue nbtests += 1 # generateData: data = _generateData(entry0, entry1, fthresh=fthresh) dataperdim[dim] = data if len(data[0]) == 0 and len(data[1]) == 0: continue # TODO: hack, modify slightly so line goes to 'zero' if minfvalue: for d in data: tmp = d[:, 0] tmp[tmp == 0] = min(min(tmp[tmp > 0]), minfvalue)**2 # plot idx = np.isfinite(data[0][:, 1]) * np.isfinite(data[1][:, 1]) ydata = data[1][idx, 1]/data[0][idx, 1] kwargs = styles[i].copy() kwargs['label'] = '%2d-D' % dim tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3*linewidth) plt.setp(tmp[0], ls='--') # This is only one possibility: #idx = (data[0][:, 3] >= 5) * (data[1][:, 3] >= 5) idx = ((data[0][:, 1] <= 3 * np.median(entry0.maxevals)) * (data[1][:, 1] <= 3 * np.median(entry1.maxevals))) if not idx.any(): fvalueswitch[dim] = np.inf # Hack: fvalueswitch is the smallest value of f where the line # was still solid. continue fvalueswitch[dim] = min(data[0][idx, 0]) ydata = data[1][idx, 1]/data[0][idx, 1] tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **styles[i]) plt.setp(tmp[1], markersize=3*linewidth) beautify(xmin=minfvalue) #beautify() ax = plt.gca() # Freeze the boundaries ax.set_autoscale_on(False) #trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # Plot everything else for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] data = dataperdim[dim] except KeyError: continue if len(data[0]) == 0 and len(data[1]) == 0: continue # annotation annotate(entry0, entry1, dim, minfvalue, nbtests=nbtests) tmp0 = np.isfinite(data[0][:, 1]) tmp1 = np.isfinite(data[1][:, 1]) idx = tmp0 * tmp1 if not idx.any(): continue #Do not plot anything else if it happens after minfvalue if data[0][idx, 0][-1] <= minfvalue: # hack for the legend continue # Determine which algorithm went further algstoppedlast = 0 algstoppedfirst = 1 if np.sum(tmp0) < np.sum(tmp1): algstoppedlast = 1 algstoppedfirst = 0 #marker if an algorithm stopped ydata = data[1][idx, 1]/data[0][idx, 1] plt.plot((data[0][idx, 0][-1], ), (ydata[-1], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4*linewidth) tmpy = ydata[-1] # plot probability of success line dataofinterest = data[algstoppedlast] tmp = np.nonzero(idx)[0][-1] # Why [0]? # add the last line for which both algorithm still have a success idx = (data[algstoppedfirst][:, 2] == 0.) * (dataofinterest[:, 2] > 0.) idx[tmp] = True if np.sum(idx) <= 1:#len(idx) == 0 or not idx.any(): continue ymin, ymax = plt.ylim() #orientation = -1 ybnd = ymin if algstoppedlast == 0: ybnd = ymax #orientation = 1 #ydata = orientation * dataofinterest[idx, 2] / 2 + 0.5 ydata = np.power(10, np.log10(ybnd) * (dataofinterest[idx, 2] -offset*(5-i)*np.log10(ymax/ymin)/np.abs(np.log10(ybnd)))) ls = '-' if dataofinterest[idx, 0][0] < fvalueswitch[dim]: ls = '--' tmp = plt.plot([dataofinterest[idx, 0][0]]*2, (tmpy, ydata[0]), **styles[i]) plt.setp(tmp, ls=ls, marker='') tmp = plt.plot((dataofinterest[idx, 0][0], ), (ydata[0], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4*linewidth) kwargs = styles[i].copy() kwargs['ls'] = ls tmp = plotUnifLogXMarkers(dataofinterest[idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3*linewidth) #Do not plot anything else if it happens after minfvalue if dataofinterest[idx, 0][-1] <= minfvalue: continue #plt.plot((dataofinterest[idx, 0][-1], ), (ydata[-1], ), marker='d', # color=styles[i]['color'], markeredgecolor=styles[i]['color'], # markerfacecolor=styles[i]['color'], markersize=4*linewidth) if isBenchmarkinfosFound: plt.title(funInfos[func]) if func in functions_with_legend: plt.legend(loc='best') # save saveFigure(filename, verbose=verbose) plt.close()
def main(dsList0, dsList1, outputdir, verbose=True): """Generate a scatter plot figure.""" #plt.rc("axes", labelsize=24, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) dictFunc0 = dsList0.dictByFunc() dictFunc1 = dsList1.dictByFunc() funcs = set(dictFunc0.keys()) & set(dictFunc1.keys()) for f in funcs: dictDim0 = dictFunc0[f].dictByDim() dictDim1 = dictFunc1[f].dictByDim() dims = set(dictDim0.keys()) & set(dictDim1.keys()) #set_trace() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue xdata = numpy.array(entry0.detERT(targets)) ydata = numpy.array(entry1.detERT(targets)) tmp = (numpy.isinf(xdata) == False) * (numpy.isinf(ydata) == False) if tmp.any(): try: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3) except KeyError: plt.plot(xdata[tmp], ydata[tmp], ls='', markersize=markersize, marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3) #try: # plt.scatter(xdata[tmp], ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3) #except ValueError: # set_trace() #ax = plt.gca() ax = plt.axes() tmp = numpy.isinf(xdata) * (numpy.isinf(ydata) == False) if tmp.any(): trans = blend(ax.transAxes, ax.transData) #plt.scatter([1.]*numpy.sum(tmp), ydata[tmp], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.] * numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot([1.] * numpy.sum(tmp), ydata[tmp], markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) #set_trace() tmp = (numpy.isinf(xdata) == False) * numpy.isinf(ydata) if tmp.any(): trans = blend(ax.transData, ax.transAxes) # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot(xdata[tmp], [1. - offset] * numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) except KeyError: plt.plot(xdata[tmp], [1. - offset] * numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=trans, clip_on=False) tmp = numpy.isinf(xdata) * numpy.isinf(ydata) if tmp.any(): # plt.scatter(xdata[tmp], [1.-offset]*numpy.sum(tmp), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1. - offset] * numpy.sum(tmp), [1. - offset] * numpy.sum(tmp), markersize=markersize, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) except KeyError: plt.plot([1. - offset] * numpy.sum(tmp), [1. - offset] * numpy.sum(tmp), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, transform=ax.transAxes, clip_on=False) #set_trace() beautify() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue minbnd, maxbnd = plt.xlim() plt.plot( (entry0.mMaxEvals(), entry0.mMaxEvals()), # (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i], (max([minbnd, entry1.mMaxEvals() / 10.]), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.plot( # (minbnd, entry0.mMaxEvals()), (max([minbnd, entry0.mMaxEvals() / 10.]), entry0.mMaxEvals()), (entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.xlim(minbnd, maxbnd) plt.ylim(minbnd, maxbnd) #Set the boundaries again: they changed due to new plots. #plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i]) #plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i]) if isBenchmarkinfosFound: try: plt.ylabel(funInfos[f]) except IndexError: pass filename = os.path.join(outputdir, 'ppscatter_f%03d' % f) saveFigure(filename, verbose=verbose) plt.close()
def comp(dsList0, dsList1, targets, isStoringXMax=False, outputdir='', info='default', verbose=True): """Generate figures of ECDF that compare 2 algorithms. :param DataSetList dsList0: list of DataSet instances for ALG0 :param DataSetList dsList1: list of DataSet instances for ALG1 :param seq targets: target function values to be displayed :param bool isStoringXMax: if set to True, the first call :py:func:`beautifyFVD` sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) if not isinstance(targets, pproc.RunlengthBasedTargetValues): targets = pproc.TargetValues.cast(targets) dictdim0 = dsList0.dictByDim() dictdim1 = dsList1.dictByDim() for d in set(dictdim0.keys()) & set(dictdim1.keys()): maxEvalsFactor = max(max(i.mMaxEvals() / d for i in dictdim0[d]), max(i.mMaxEvals() / d for i in dictdim1[d])) if isStoringXMax: global evalfmax else: evalfmax = None if not evalfmax: evalfmax = maxEvalsFactor**1.05 if runlen_xlimits_max is not None: evalfmax = runlen_xlimits_max filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info)) fig = plt.figure() for j in range(len(targets)): tmp = plotRLDistr(dictdim0[d], lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance( targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), marker=genericsettings.line_styles[1]['marker'], **rldStyles[j % len(rldStyles)]) plt.setp(tmp[-1], label=None) # Remove automatic legend # Mods are added after to prevent them from appearing in the legend plt.setp(tmp, markersize=20., markeredgewidth=plt.getp(tmp[-1], 'linewidth'), markeredgecolor=plt.getp(tmp[-1], 'color'), markerfacecolor='none') tmp = plotRLDistr(dictdim1[d], lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance( targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), marker=genericsettings.line_styles[0]['marker'], **rldStyles[j % len(rldStyles)]) # modify the automatic legend: remover marker and change text plt.setp(tmp[-1], marker='', label=targets.label(j) if isinstance( targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j)) # Mods are added after to prevent them from appearing in the legend plt.setp(tmp, markersize=15., markeredgewidth=plt.getp(tmp[-1], 'linewidth'), markeredgecolor=plt.getp(tmp[-1], 'color'), markerfacecolor='none') funcs = set(i.funcId for i in dictdim0[d]) | set(i.funcId for i in dictdim1[d]) text = 'f%s' % (consecutiveNumbers(sorted(funcs))) if not isinstance(targets, pproc.RunlengthBasedTargetValues): plot_previous_algorithms(d, funcs) else: plotRLB_previous_algorithms(d, funcs) # plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim0[d]), ls='--', color='k') # plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim1[d]), color='k') plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim0[d]), marker='+', markersize=20., color='k', markeredgewidth=plt.getp( tmp[-1], 'linewidth', )) plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim1[d]), marker='o', markersize=15., color='k', markerfacecolor='None', markeredgewidth=plt.getp(tmp[-1], 'linewidth')) plt.legend(loc='best') plt.text( 0.5, 0.98, text, horizontalalignment="center", verticalalignment="top", transform=plt.gca().transAxes) # bbox=dict(ec='k', fill=False), beautifyRLD(evalfmax) saveFigure(filename, verbose=verbose) plt.close(fig)
def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True): """Returns ERT1/ERT0 comparison figure.""" #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) # minfvalue = pproc.TargetValues.cast(minfvalue) dictFun0 = dsList0.dictByFunc() dictFun1 = dsList1.dictByFunc() for func in set.intersection(set(dictFun0), set(dictFun1)): dictDim0 = dictFun0[func].dictByDim() dictDim1 = dictFun1[func].dictByDim() if isBenchmarkinfosFound: title = funInfos[func] else: title = '' filename = os.path.join(outputdir, 'ppfig2_f%03d' % (func)) dims = sorted(set.intersection(set(dictDim0), set(dictDim1))) handles = [] dataperdim = {} fvalueswitch = {} nbtests = 0 for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] except KeyError: continue nbtests += 1 # generateData: data = _generateData(entry0, entry1, fthresh=fthresh) dataperdim[dim] = data if len(data[0]) == 0 and len(data[1]) == 0: continue # TODO: hack, modify slightly so line goes to 'zero' if minfvalue: for d in data: tmp = d[:, 0] tmp[tmp == 0] = min(min(tmp[tmp > 0]), minfvalue)**2 # plot idx = np.isfinite(data[0][:, 1]) * np.isfinite(data[1][:, 1]) ydata = data[1][idx, 1] / data[0][idx, 1] kwargs = styles[i].copy() kwargs['label'] = '%2d-D' % dim tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3 * linewidth) plt.setp(tmp[0], ls='--') # This is only one possibility: #idx = (data[0][:, 3] >= 5) * (data[1][:, 3] >= 5) idx = ((data[0][:, 1] <= 3 * np.median(entry0.maxevals)) * (data[1][:, 1] <= 3 * np.median(entry1.maxevals))) if not idx.any(): fvalueswitch[dim] = np.inf # Hack: fvalueswitch is the smallest value of f where the line # was still solid. continue fvalueswitch[dim] = min(data[0][idx, 0]) ydata = data[1][idx, 1] / data[0][idx, 1] tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **styles[i]) plt.setp(tmp[1], markersize=3 * linewidth) beautify(xmin=minfvalue) #beautify() ax = plt.gca() # Freeze the boundaries ax.set_autoscale_on(False) #trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # Plot everything else for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] data = dataperdim[dim] except KeyError: continue if len(data[0]) == 0 and len(data[1]) == 0: continue # annotation annotate(entry0, entry1, dim, minfvalue, nbtests=nbtests) tmp0 = np.isfinite(data[0][:, 1]) tmp1 = np.isfinite(data[1][:, 1]) idx = tmp0 * tmp1 if not idx.any(): continue #Do not plot anything else if it happens after minfvalue if data[0][idx, 0][-1] <= minfvalue: # hack for the legend continue # Determine which algorithm went further algstoppedlast = 0 algstoppedfirst = 1 if np.sum(tmp0) < np.sum(tmp1): algstoppedlast = 1 algstoppedfirst = 0 #marker if an algorithm stopped ydata = data[1][idx, 1] / data[0][idx, 1] plt.plot((data[0][idx, 0][-1], ), (ydata[-1], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4 * linewidth) tmpy = ydata[-1] # plot probability of success line dataofinterest = data[algstoppedlast] tmp = np.nonzero(idx)[0][-1] # Why [0]? # add the last line for which both algorithm still have a success idx = (data[algstoppedfirst][:, 2] == 0.) * (dataofinterest[:, 2] > 0.) idx[tmp] = True if np.sum(idx) <= 1: #len(idx) == 0 or not idx.any(): continue ymin, ymax = plt.ylim() #orientation = -1 ybnd = ymin if algstoppedlast == 0: ybnd = ymax #orientation = 1 #ydata = orientation * dataofinterest[idx, 2] / 2 + 0.5 ydata = np.power( 10, np.log10(ybnd) * (dataofinterest[idx, 2] - offset * (5 - i) * np.log10(ymax / ymin) / np.abs(np.log10(ybnd)))) ls = '-' if dataofinterest[idx, 0][0] < fvalueswitch[dim]: ls = '--' tmp = plt.plot([dataofinterest[idx, 0][0]] * 2, (tmpy, ydata[0]), **styles[i]) plt.setp(tmp, ls=ls, marker='') tmp = plt.plot((dataofinterest[idx, 0][0], ), (ydata[0], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4 * linewidth) kwargs = styles[i].copy() kwargs['ls'] = ls tmp = plotUnifLogXMarkers(dataofinterest[idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3 * linewidth) #Do not plot anything else if it happens after minfvalue if dataofinterest[idx, 0][-1] <= minfvalue: continue #plt.plot((dataofinterest[idx, 0][-1], ), (ydata[-1], ), marker='d', # color=styles[i]['color'], markeredgecolor=styles[i]['color'], # markerfacecolor=styles[i]['color'], markersize=4*linewidth) if isBenchmarkinfosFound: plt.title(funInfos[func]) if func in functions_with_legend: plt.legend(loc='best') # save saveFigure(filename, verbose=verbose) plt.close()
def generateFigure(dsList, CrE=0., isStoringXRange=True, outputdir='.', info='default', verbose=True): """Generates ERT loss ratio figures. :param DataSetList dsList: input data set :param float CrE: crafting effort (see COCO documentation) :param bool isStoringXRange: if set to True, the first call to this function sets the global :py:data:`evalf` and all subsequent calls will use this value as boundaries in the generated figures. :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) if isStoringXRange: global evalf else: evalf = None # do not aggregate over dimensions for d, dsdim in dsList.dictByDim().iteritems(): maxevals = max(max(i.ert[numpy.isinf(i.ert) == False]) for i in dsdim) EVALS = [2. * d] EVALS.extend( numpy.power( 10., numpy.arange(1, numpy.floor(numpy.log10(maxevals * 1. / d)))) * d) if not evalf: evalf = (numpy.log10(EVALS[0] / d), numpy.log10(EVALS[-1] / d)) data = generateData(dsdim, EVALS, CrE) ydata = [] for i in range(len(EVALS)): #Aggregate over functions. ydata.append(numpy.log10(list(data[f][i] for f in data))) xdata = numpy.log10(numpy.array(EVALS) / d) xticklabels = [''] xticklabels.extend('%d' % i for i in xdata[1:]) plot(xdata, ydata) filename = os.path.join(outputdir, 'pplogloss_%02dD_%s' % (d, info)) plt.xticks(xdata, xticklabels) #Is there an upper bound? if CrE > 0 and len(set(dsdim.dictByFunc().keys())) >= 20: #TODO: hopefully this means we are not considering function groups. plt.text(0.01, 0.98, 'CrE = %5g' % CrE, fontsize=20, horizontalalignment='left', verticalalignment='top', transform=plt.gca().transAxes, bbox=dict(facecolor='w')) plt.axhline(1., color='k', ls='-', zorder=-1) plt.axvline(x=numpy.log10(max(i.mMaxEvals() / d for i in dsdim)), color='k') funcs = set(i.funcId for i in dsdim) if len(funcs) > 1: text = 'f%d-%d' % (min(funcs), max(funcs)) else: text = 'f%d' % (funcs.pop()) plt.text(0.5, 0.93, text, horizontalalignment="center", transform=plt.gca().transAxes) beautify() if evalf: plt.xlim(xmin=evalf[0] - 0.5, xmax=evalf[1] + 0.5) saveFigure(filename, verbose=verbose) #plt.show() plt.close()
def main(dictAlg, sortedAlgs=None, target=ftarget_default, outputdir='ppdata', verbose=True): """From a DataSetList, returns figures showing the scaling: ERT/dim vs dim. One function and one target per figure. ``target`` can be a scalar, a list with one element or a ``pproc.TargetValues`` instance with one target. ``sortedAlgs`` is a list of string-identifies (folder names) """ # target becomes a TargetValues "list" with one element target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target) latex_commands_filename = os.path.join(outputdir, 'bbob_pproc_commands.tex') assert isinstance(target, pproc.TargetValues) if len(target) != 1: raise ValueError('only a single target can be managed in ppfigs, ' + str(len(target)) + ' targets were given') dictFunc = pproc.dictAlgByFun(dictAlg) if sortedAlgs is None: sortedAlgs = sorted(dictAlg.keys()) if not os.path.isdir(outputdir): os.mkdir(outputdir) for f in dictFunc: filename = os.path.join(outputdir,'ppfigs_f%03d' % (f)) handles = [] fix_styles(len(sortedAlgs)) # for i, alg in enumerate(sortedAlgs): dictDim = dictFunc[f][alg].dictByDim() # this does not look like the most obvious solution #Collect data dimert = [] ert = [] dimnbsucc = [] ynbsucc = [] nbsucc = [] dimmaxevals = [] maxevals = [] dimmedian = [] medianfes = [] for dim in sorted(dictDim): assert len(dictDim[dim]) == 1 entry = dictDim[dim][0] data = generateData(entry, target((f, dim))[0]) # TODO: here we might want a different target for each function if 1 < 3 or data[2] == 0: # No success dimmaxevals.append(dim) maxevals.append(float(data[3])/dim) if data[2] > 0: dimmedian.append(dim) medianfes.append(data[4]/dim) dimert.append(dim) ert.append(float(data[0])/dim) if data[1] < 1.: dimnbsucc.append(dim) ynbsucc.append(float(data[0])/dim) nbsucc.append('%d' % data[2]) # Draw lines tmp = plt.plot(dimert, ert, **styles[i]) #label=alg, ) plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color')) # For legend # tmp = plt.plot([], [], label=alg.replace('..' + os.sep, '').strip(os.sep), **styles[i]) tmp = plt.plot([], [], label=alg.split(os.sep)[-1], **styles[i]) plt.setp(tmp[0], markersize=12., markeredgecolor=plt.getp(tmp[0], 'color')) if dimmaxevals: tmp = plt.plot(dimmaxevals, maxevals, **styles[i]) plt.setp(tmp[0], markersize=20, #label=alg, markeredgecolor=plt.getp(tmp[0], 'color'), markeredgewidth=1, markerfacecolor='None', linestyle='None') handles.append(tmp) #tmp2 = plt.plot(dimmedian, medianfes, ls='', marker='+', # markersize=30, markeredgewidth=5, # markeredgecolor=plt.getp(tmp, 'color'))[0] #for i, n in enumerate(nbsucc): # plt.text(dimnbsucc[i], numpy.array(ynbsucc[i])*1.85, n, # verticalalignment='bottom', # horizontalalignment='center') if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgdata = [] dimbestalg = list(df[0] for df in bestalg.bestalgentries2009 if df[1] == f) dimbestalg.sort() dimbestalg2 = [] for d in dimbestalg: entry = bestalg.bestalgentries2009[(d, f)] tmp = entry.detERT(target((f, d)))[0] if numpy.isfinite(tmp): bestalgdata.append(float(tmp)/d) dimbestalg2.append(d) tmp = plt.plot(dimbestalg2, bestalgdata, color=refcolor, linewidth=10, marker='d', markersize=25, markeredgecolor=refcolor, zorder=-1 #label='best 2009', ) handles.append(tmp) if show_significance: # plot significance-stars xstar, ystar = [], [] dims = sorted(pproc.dictAlgByDim(dictFunc[f])) for i, dim in enumerate(dims): datasets = pproc.dictAlgByDim(dictFunc[f])[dim] assert all([len(datasets[ialg]) == 1 for ialg in sortedAlgs if datasets[ialg]]) dsetlist = [datasets[ialg][0] for ialg in sortedAlgs if datasets[ialg]] if len(dsetlist) > 1: arzp, arialg = toolsstats.significance_all_best_vs_other(dsetlist, target((f, dim))) if arzp[0][1] * len(dims) < show_significance: ert = dsetlist[arialg[0]].detERT(target((f, dim)))[0] if ert < numpy.inf: xstar.append(dim) ystar.append(ert/dim) plt.plot(xstar, ystar, 'k*', markerfacecolor=None, markeredgewidth=2, markersize=0.5*styles[0]['markersize']) if funInfos: plt.gca().set_title(funInfos[f]) isLegend = False if legend: plotLegend(handles) elif 1 < 3: if f in (1, 24, 101, 130) and len(sortedAlgs) < 6: # 6 elements at most in the boxed legend isLegend = True beautify(legend=isLegend, rightlegend=legend) plt.text(plt.xlim()[0], plt.ylim()[0], 'target ' + target.label_name() + ': ' + target.label(0)) # TODO: check saveFigure(filename, verbose=verbose) plt.close() # generate commands in tex file: try: abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' alg_definitions = [] for i in range(len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) alg_definitions.append((', ' if i > 0 else '') + '%s:%s' % (symb, '\\algorithm' + abc[i % len(abc)])) toolsdivers.prepend_to_file(latex_commands_filename, [#'\\providecommand{\\bbobppfigsftarget}{\\ensuremath{10^{%s}}}' # % target.loglabel(0), # int(numpy.round(numpy.log10(target))), '\\providecommand{\\bbobppfigslegend}[1]{', scaling_figure_caption(target), 'Legend: '] + alg_definitions + ['}'] ) toolsdivers.prepend_to_file(latex_commands_filename, ['\\providecommand{\\bbobECDFslegend}[1]{', ecdfs_figure_caption(target), '}'] ) if verbose: print 'Wrote commands and legend to %s' % filename # this is obsolete (however check templates) filename = os.path.join(outputdir,'ppfigs.tex') f = open(filename, 'w') f.write('% Do not modify this file: calls to post-processing software' + ' will overwrite any modification.\n') f.write('Legend: ') for i in range(0, len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) f.write((', ' if i > 0 else '') + '%s:%s' % (symb, writeLabels(sortedAlgs[i]))) f.close() if verbose: print '(obsolete) Wrote legend in %s' % filename except IOError: raise handles.append(tmp) if funInfos: plt.gca().set_title(funInfos[f]) beautify(rightlegend=legend) if legend: plotLegend(handles) else: if f in (1, 24, 101, 130): plt.legend() saveFigure(filename, figFormat=genericsettings.fig_formats, verbose=verbose) plt.close()
def main(dictAlg, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][ 0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ( 'Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) if displaybest2009: #set_trace() if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgentry = bestalg.bestalgentries2009[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[ bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = { 'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1 } lines.append( plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE=0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append( plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir, 'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write( r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len( order ) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write( r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir, 'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys()))) text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join( (str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' % (dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED) if close_figure: plt.close()
def main2(dsList0, dsList1, valuesOfInterest=None, outputdir='', info='default', verbose=True): """Generate figures of empirical cumulative distribution functions. Keyword arguments: indexEntries -- list of IndexEntry instances to process. valuesOfInterest -- target function values to be displayed. isStoringXMax -- if set to True, the first call BeautifyVD sets the globals fmax and maxEvals and all subsequent calls will use these values as rightmost xlim in the generated figures. -- if set to True, the first call BeautifyVD sets the global fmax and all subsequent call will have the same maximum xlim. outputdir -- output directory (must exist) info --- string suffix for output file names. Outputs: Image files of the empirical cumulative distribution functions. """ plt.rc("axes", labelsize=20, titlesize=24) plt.rc("xtick", labelsize=20) plt.rc("ytick", labelsize=20) plt.rc("font", size=20) plt.rc("legend", fontsize=20) figureName = os.path.join(outputdir, 'pplogabs_%s' % (info)) tmp = plotLogAbs2(dsList0, dsList1, valuesOfInterest, verbose=verbose) beautify2() # Prolong to the boundary xmin, xmax = plt.xlim() for i in tmp: try: xdata, ydata = i.get_data() except AttributeError: xdata = i.get_xdata() ydata = i.get_ydata() if len(xdata) == 0 or len(ydata) == 0: continue xdata = numpy.insert(xdata, 0, xmin) try: xdata = numpy.insert(xdata, len(xdata), xmax) except OverflowError: xdata = xdata + 0.0 # TODO: Hack for float conversion, compatibility with 0.8 xdata = numpy.insert(xdata, len(xdata), xmax) ydata = numpy.insert(ydata, 0, ydata[0]) ydata = numpy.insert(ydata, len(ydata), ydata[-1]) i.set_data(xdata, ydata) plt.legend(loc='best') #plt.text(0.5, 0.93, text, horizontalalignment="center", # transform=axisHandle.transAxes) funcs = set(dsList0.dictByFunc().keys()) & set(dsList1.dictByFunc().keys()) text = 'f%s' % consecutiveNumbers(sorted(funcs)) plt.text(0.98, 0.02, text, horizontalalignment="right", transform=plt.gca().transAxes) #set_trace() saveFigure(figureName, figFormat=figformat, verbose=verbose) plt.close() #set_trace() plt.rcdefaults()
def main(dsList, isStoringXMax=False, outputdir='', info='default', verbose=True): """Generate figures of empirical cumulative distribution functions. This method has a feature which allows to keep the same boundaries for the x-axis, if ``isStoringXMax==True``. This makes sense when dealing with different functions or subsets of functions for one given dimension. CAVE: this is bug-prone, as some data depend on the maximum evaluations and the appearence therefore depends on the calling order. :param DataSetList dsList: list of DataSet instances to process. :param bool isStoringXMax: if set to True, the first call :py:func:`beautifyFVD` sets the globals :py:data:`fmax` and :py:data:`maxEvals` and all subsequent calls will use these values as rightmost xlim in the generated figures. :param string outputdir: output directory (must exist) :param string info: string suffix for output file names. :param bool verbose: control verbosity """ # plt.rc("axes", labelsize=20, titlesize=24) # plt.rc("xtick", labelsize=20) # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) targets = single_target_values # convenience abbreviation for d, dictdim in dsList.dictByDim().iteritems(): maxEvalsFactor = max(i.mMaxEvals() / d for i in dictdim) if isStoringXMax: global evalfmax else: evalfmax = None if not evalfmax: evalfmax = maxEvalsFactor if runlen_xlimits_max is not None: evalfmax = runlen_xlimits_max # first figure: Run Length Distribution filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info)) fig = plt.figure() for j in range(len(targets)): plotRLDistr( dictdim, lambda fun_dim: targets(fun_dim)[j], targets.label(j) if isinstance( targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j), evalfmax, # can be larger maxEvalsFactor with no effect **rldStyles[j % len(rldStyles)]) funcs = list(i.funcId for i in dictdim) text = 'f%s' % (consecutiveNumbers(sorted(funcs))) text += ',%d-D' % d if (1): # try: if not isinstance(targets, pproc.RunlengthBasedTargetValues): # if targets.target_values[-1] == 1e-8: # this is a hack plot_previous_algorithms(d, funcs) else: plotRLB_previous_algorithms(d, funcs) # except: # pass plt.axvline(x=maxEvalsFactor, color='k') # vertical line at maxevals plt.legend(loc='best') plt.text(0.5, 0.98, text, horizontalalignment="center", verticalalignment="top", transform=plt.gca().transAxes # bbox=dict(ec='k', fill=False) ) try: # was never tested, so let's make it safe if len(funcs) == 1: plt.title(genericsettings.current_testbed.info(funcs[0])[:27]) except: warnings.warn('could not print title') beautifyRLD(evalfmax) saveFigure(filename, verbose=verbose) plt.close(fig) # second figure: Function Value Distribution filename = os.path.join(outputdir, 'ppfvdistr_%02dD_%s' % (d, info)) fig = plt.figure() plotFVDistr(dictdim, np.inf, 1e-8, **rldStyles[-1]) # coloring right to left for j, max_eval_factor in enumerate(single_runlength_factors): if max_eval_factor > maxEvalsFactor: break plotFVDistr(dictdim, max_eval_factor, 1e-8, **rldUnsuccStyles[j % len(rldUnsuccStyles)]) plt.text( 0.98, 0.02, text, horizontalalignment="right", transform=plt.gca().transAxes) # bbox=dict(ec='k', fill=False), beautifyFVD(isStoringXMax=isStoringXMax, ylabel=False) saveFigure(filename, verbose=verbose) plt.close(fig)
def main(dictAlg, sortedAlgs, target=1e-8, outputdir='ppdata', verbose=True): """From a DataSetList, returns figures showing the scaling: ERT/dim vs dim. One function and one target per figure. sortedAlgs is a list of string-identifies (folder names) """ dictFunc = pproc.dictAlgByFun(dictAlg) for f in dictFunc: filename = os.path.join(outputdir,'ppfigs_f%03d' % (f)) handles = [] fix_styles(len(sortedAlgs)) # for i, alg in enumerate(sortedAlgs): dictDim = dictFunc[f][alg].dictByDim() #Collect data dimert = [] ert = [] dimnbsucc = [] ynbsucc = [] nbsucc = [] dimmaxevals = [] maxevals = [] dimmedian = [] medianfes = [] for dim in sorted(dictDim): assert len(dictDim[dim]) == 1 entry = dictDim[dim][0] data = generateData(entry, target) # TODO: here we might want a different target for each function if 1 < 3 or data[2] == 0: # No success dimmaxevals.append(dim) maxevals.append(float(data[3])/dim) if data[2] > 0: dimmedian.append(dim) medianfes.append(data[4]/dim) dimert.append(dim) ert.append(float(data[0])/dim) if data[1] < 1.: dimnbsucc.append(dim) ynbsucc.append(float(data[0])/dim) nbsucc.append('%d' % data[2]) # Draw lines tmp = plt.plot(dimert, ert, **styles[i]) #label=alg, ) plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color')) # For legend # tmp = plt.plot([], [], label=alg.replace('..' + os.sep, '').strip(os.sep), **styles[i]) tmp = plt.plot([], [], label=alg.split(os.sep)[-1], **styles[i]) plt.setp(tmp[0], markersize=12., markeredgecolor=plt.getp(tmp[0], 'color')) if dimmaxevals: tmp = plt.plot(dimmaxevals, maxevals, **styles[i]) plt.setp(tmp[0], markersize=20, #label=alg, markeredgecolor=plt.getp(tmp[0], 'color'), markeredgewidth=1, markerfacecolor='None', linestyle='None') handles.append(tmp) #tmp2 = plt.plot(dimmedian, medianfes, ls='', marker='+', # markersize=30, markeredgewidth=5, # markeredgecolor=plt.getp(tmp, 'color'))[0] #for i, n in enumerate(nbsucc): # plt.text(dimnbsucc[i], numpy.array(ynbsucc[i])*1.85, n, # verticalalignment='bottom', # horizontalalignment='center') if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgdata = [] dimbestalg = list(df[0] for df in bestalg.bestalgentries2009 if df[1] == f) dimbestalg.sort() dimbestalg2 = [] for d in dimbestalg: entry = bestalg.bestalgentries2009[(d, f)] tmp = entry.detERT([target])[0] if numpy.isfinite(tmp): bestalgdata.append(float(tmp)/d) dimbestalg2.append(d) tmp = plt.plot(dimbestalg2, bestalgdata, color=refcolor, linewidth=10, marker='d', markersize=25, markeredgecolor=refcolor, zorder=-1 #label='best 2009', ) handles.append(tmp) if show_significance: # plot significance-stars xstar, ystar = [], [] dims = sorted(pproc.dictAlgByDim(dictFunc[f])) for i, dim in enumerate(dims): datasets = pproc.dictAlgByDim(dictFunc[f])[dim] assert all([len(datasets[ialg]) == 1 for ialg in sortedAlgs if datasets[ialg]]) dsetlist = [datasets[ialg][0] for ialg in sortedAlgs if datasets[ialg]] if len(dsetlist) > 1: arzp, arialg = toolsstats.significance_all_best_vs_other(dsetlist, [target]) if arzp[0][1] * len(dims) < 0.05: ert = dsetlist[arialg[0]].detERT([target])[0] if ert < numpy.inf: xstar.append(dim) ystar.append(ert/dim) plt.plot(xstar, ystar, 'k*', markerfacecolor=None, markeredgewidth=2, markersize=0.5*styles[0]['markersize']) if funInfos: plt.gca().set_title(funInfos[f]) isLegend = False if legend: plotLegend(handles) elif 1 < 3: if f in (1, 24, 101, 130) and len(sortedAlgs) < 6: # 6 elements at most in the boxed legend isLegend = True beautify(legend=isLegend, rightlegend=legend) plt.text(plt.xlim()[0], plt.ylim()[0], 'ftarget=%.0e' % target) saveFigure(filename, verbose=verbose) plt.close() # generate commands in tex file: try: abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' alg_definitions = [] for i in range(len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) alg_definitions.append((', ' if i > 0 else '') + '%s:%s' % (symb, '\\algorithm' + abc[i % len(abc)])) filename = os.path.join(outputdir, 'bbob_pproc_commands.tex') toolsdivers.prepend_to_file(filename, ['\\providecommand{\\bbobppfigsftarget}{\\ensuremath{10^{%d}}}' % int(numpy.round(numpy.log10(target))), '\\providecommand{\\bbobppfigslegend}[1]{', scaling_figure_legend, 'Legend: '] + alg_definitions + ['}'] ) if verbose: print 'Wrote commands and legend to %s' % filename # this is obsolete (however check templates) filename = os.path.join(outputdir,'ppfigs.tex') f = open(filename, 'w') f.write('% Do not modify this file: calls to post-processing software' + ' will overwrite any modification.\n') f.write('Legend: ') for i in range(0, len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) f.write((', ' if i > 0 else '') + '%s:%s' % (symb, writeLabels(sortedAlgs[i]))) f.close() if verbose: print '(obsolete) Wrote legend in %s' % filename except IOError: raise handles.append(tmp) if funInfos: plt.gca().set_title(funInfos[f]) beautify(rightlegend=legend) if legend: plotLegend(handles) else: if f in (1, 24, 101, 130): plt.legend() saveFigure(filename, figFormat=genericsettings.fig_formats, verbose=verbose) plt.close()