def build(dictAlg, sortedAlg=None): """Merge datasets in an algorithm portfolio. :param dict dictAlg: dictionary of data sets with algorithm name for keys :param seq sortedAlgs: sequence for sorting the entries of :py:data:`dictAlg`, if not provided, dictAlg.keys() will be instead :returns: an instance of :py:class:`DataSetList` with the porfolio data sets """ if not sortedAlg: sortedAlg = dictAlg.keys() tmpres = [] for f, i in pp.dictAlgByFun(dictAlg).iteritems(): for d, j in pp.dictAlgByDim(i).iteritems(): tmp = [] if sortedAlg: tmplist = list(j[k] for k in sortedAlg) else: tmplist = j.values() for k in tmplist: assert len(k) == 1 # one element list tmp.append(k[0]) try: tmpres.append(DataSet(tmp)) except Usage, err: print >>sys.stderr, err.msg
def generate(dictalg): """Generates dictionary of best algorithm data set. """ #dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose) res = {} for f, i in pproc.dictAlgByFun(dictalg).iteritems(): for d, j in pproc.dictAlgByDim(i).iteritems(): tmp = BestAlgSet(j) res[(d, f)] = tmp return res
def generate(dictalg): """Generates dictionary of best algorithm data set. """ # dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose) res = {} for f, i in pproc.dictAlgByFun(dictalg).iteritems(): for d, j in pproc.dictAlgByDim(i).iteritems(): tmp = BestAlgSet(j) res[(d, f)] = tmp return res
def main(argv=None): """ """ if argv is None: argv = sys.argv[1:] try: try: opts, args = getopt.getopt(argv, "h", ["help"]) except getopt.error, msg: raise Usage(msg) args = algs # if not (args): # usage() # sys.exit() outputdir = 'bestAlg' verbose = True #Process options for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() else: assert False, "unhandled option" dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose) #set_trace() if not os.path.exists(outputdir): os.mkdir(outputdir) if verbose: print 'Folder %s was created.' % (outputdir) res = {} for f, i in dictAlgByFun(dictAlg).iteritems(): for d, j in dictAlgByDim(i).iteritems(): tmp = BestAlgSet(j) picklefilename = os.path.join( outputdir, 'bestalg_f%03d_%02d.pickle' % (f, d)) fid = open(picklefilename, 'w') pickle.dump(tmp, fid, 2) fid.close() res[(d, f)] = tmp picklefilename = os.path.join(outputdir, 'bestalg.pickle') fid = open(picklefilename, 'w') pickle.dump(res, fid, 2) fid.close()
def all_single_functions(dictAlg, sortedAlgs=None, outputdir='.', verbose=0): dictFG = pp.dictAlgByFun(dictAlg) for fg, tmpdictAlg in dictFG.iteritems(): dictDim = pp.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): single_fct_output_dir = ( outputdir.rstrip(os.sep) + os.sep + 'pprldmany-single-functions' # + os.sep + ('f%03d' % fg) ) if not os.path.exists(single_fct_output_dir): os.makedirs(single_fct_output_dir) main(entries, order=sortedAlgs, outputdir=single_fct_output_dir, info=('f%03d_%02dD' % (fg, d)), verbose=verbose)
def all_single_functions(dictAlg, sortedAlgs=None, outputdir='.', verbose=0): dictFG = pp.dictAlgByFun(dictAlg) for fg, tmpdictAlg in dictFG.iteritems(): dictDim = pp.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): single_fct_output_dir = (outputdir.rstrip(os.sep) + os.sep + 'pprldmany-single-functions' # + os.sep + ('f%03d' % fg) ) if not os.path.exists(single_fct_output_dir): os.makedirs(single_fct_output_dir) main(entries, order=sortedAlgs, outputdir=single_fct_output_dir, info=('f%03d_%02dD' % (fg, d)), verbose=verbose)
def main(dictAlg, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][ 0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ( 'Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) if displaybest2009: #set_trace() if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgentry = bestalg.bestalgentries2009[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[ bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = { 'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1 } lines.append( plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE=0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append( plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir, 'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write( r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len( order ) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write( r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir, 'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys()))) text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join( (str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' % (dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED) if close_figure: plt.close()
plt.rc("axes", **inset.rcaxes) plt.rc("xtick", **inset.rctick) plt.rc("ytick", **inset.rctick) plt.rc("font", **inset.rcfont) plt.rc("legend", **inset.rclegend) #convergence plots if isConv: ppconverrorbars.main(dictAlg,outputdir,verbose) # Performance profiles if isPer: config.config() # ECDFs per noise groups dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): # pprldmany.main(entries, inset.summarized_target_function_values, # from . import config # config.config() pprldmany.main(entries, # pass expensive flag here? order=sortedAlgs, outputdir=outputdir, info=('%02dD_%s' % (d, ng)), verbose=verbose) # ECDFs per function groups dictFG = pproc.dictAlgByFuncGroup(dictAlg) for fg, tmpdictAlg in dictFG.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): pprldmany.main(entries,
plt.rc("axes", **inset.rcaxes) plt.rc("xtick", **inset.rctick) plt.rc("ytick", **inset.rctick) plt.rc("font", **inset.rcfont) plt.rc("legend", **inset.rclegend) #convergence plots if isConv: ppconverrorbars.main(dictAlg, outputdir, verbose) # Performance profiles if isPer: # ECDFs per noise groups dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): # pprldmany.main(entries, inset.summarized_target_function_values, # from . import config # config.config() pprldmany.main( entries, # pass expensive flag here? order=sortedAlgs, outputdir=outputdir, info=('%02dD_%s' % (d, ng)), verbose=verbose) # ECDFs per function groups dictFG = pproc.dictAlgByFuncGroup(dictAlg) for fg, tmpdictAlg in dictFG.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems():
def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) displaybest2009 = not isBiobjective #disabled until we find the bug if displaybest2009: #set_trace() bestalgentries = bestalg.loadBestAlgorithm(isBiobjective) bestalgentry = bestalgentries[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len(order) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f') text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join((str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' %(dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED ) if close_figure: plt.close()
def main(dictAlg, order=None, outputdir='.', info='default', verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1: raise Exception('We never integrate over dimension.') dim = tmp.keys()[0] algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] runlengthsucc = evals[np.isnan(evals) == False] / entry.dim runlengthunsucc = entry.maxevals[np.isnan(evals)] / entry.dim if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) if displaybest2009: #set_trace() if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgentry = bestalg.bestalgentries2009[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() runlengthsucc = evals[np.isnan(evals) == False] / bestalgentry.dim runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / bestalgentry.dim x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = alg #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: #isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) try: f = open(fileName, 'w') f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex(toolsdivers.strip_pathname2(alg)))) algtocommand[alg] = tmp commandnames = [] if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp for l in labels: commandnames.append(algtocommand[l]) # f.write(headleg) f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}' % commandnames[0]) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName except: raise # TODO: Does this make sense? else: f.close() figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys()))) text += ',%d-D' % dim plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) ppfig.saveFigure(figureName, verbose=verbose) plt.close()
def extractBestAlgorithms(args = algs2009, f_factor=2, target_lb=1e-8, target_ub=1e22): """Returns (and prints) per dimension a list of algorithms within algorithm list args that contains an algorithm if for any dimension/target/function pair this algorithm: - is the best algorithm wrt ERT - its own ERT lies within a factor f_factor of the best ERT - there is no algorithm within a factor of f_factor of the best ERT and the current algorithm is the second best. """ # TODO: use pproc.TargetValues class as input target values # default target values: targets = pproc.TargetValues( 10**np.arange(np.log10(max((1e-8, target_lb))), np.log10(target_ub) + 1e-9, 0.2)) # there should be a simpler way to express this to become the # interface of this function print 'Loading algorithm data from given algorithm list...\n' verbose = True dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose) print 'This may take a while (depending on the number of algorithms)' selectedAlgsPerProblem = {} for f, i in pproc.dictAlgByFun(dictAlg).iteritems(): for d, j in pproc.dictAlgByDim(i).iteritems(): selectedAlgsPerProblemDF = [] best = BestAlgSet(j) for i in range(0, len(best.target)): t = best.target[i] # if ((t <= target_ub) and (t >= target_lb)): if toolsstats.in_approximately(t, targets((f, d), discretize=True)): # add best for this target: selectedAlgsPerProblemDF.append(best.algs[i]) # add second best or all algorithms that have an ERT # within a factor of f_factor of the best: secondbest_ERT = np.infty secondbest_str = '' secondbest_included = False for astring in j: currdictalg = dictAlg[astring].dictByDim() if currdictalg.has_key(d): curralgdata = currdictalg[d][f-1] currERT = curralgdata.detERT([t])[0] if (astring != best.algs[i]): if (currERT < secondbest_ERT): secondbest_ERT = currERT secondbest_str = astring if (currERT <= best.detERT([t])[0] * f_factor): selectedAlgsPerProblemDF.append(astring) secondbest_included = True if not (secondbest_included) and (secondbest_str != ''): selectedAlgsPerProblemDF.append(secondbest_str) if len(selectedAlgsPerProblemDF) > 0: selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF print 'pre-processing of function', f, 'done.' print 'loading of best algorithm(s) data done.' countsperalgorithm = {} for (d, f) in selectedAlgsPerProblem: print 'dimension:', d, ', function:', f setofalgs = set(selectedAlgsPerProblem[d,f]) # now count how often algorithm a is best for the extracted targets for a in setofalgs: # use setdefault to initialize with zero if a entry not existant: countsperalgorithm.setdefault((d, a), 0) countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a) selectedalgsperdimension = {} for (d,a) in sorted(countsperalgorithm): if not selectedalgsperdimension.has_key(d): selectedalgsperdimension[d] = [] selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a)) for d in sorted(selectedalgsperdimension): print d, 'D:' for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True): print count, alg print '\n' print " done." return selectedalgsperdimension
def extractBestAlgorithms(args=algs2009, f_factor=2, target_lb=1e-8, target_ub=1e22): """Returns (and prints) per dimension a list of algorithms within algorithm list args that contains an algorithm if for any dimension/target/function pair this algorithm: - is the best algorithm wrt ERT - its own ERT lies within a factor f_factor of the best ERT - there is no algorithm within a factor of f_factor of the best ERT and the current algorithm is the second best. """ # TODO: use pproc.TargetValues class as input target values # default target values: targets = pproc.TargetValues(10**np.arange( np.log10(max((1e-8, target_lb))), np.log10(target_ub) + 1e-9, 0.2)) # there should be a simpler way to express this to become the # interface of this function print 'Loading algorithm data from given algorithm list...\n' verbose = True dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose) print 'This may take a while (depending on the number of algorithms)' selectedAlgsPerProblem = {} for f, i in pproc.dictAlgByFun(dictAlg).iteritems(): for d, j in pproc.dictAlgByDim(i).iteritems(): selectedAlgsPerProblemDF = [] best = BestAlgSet(j) for i in range(0, len(best.target)): t = best.target[i] # if ((t <= target_ub) and (t >= target_lb)): if toolsstats.in_approximately( t, targets((f, d), discretize=True)): # add best for this target: selectedAlgsPerProblemDF.append(best.algs[i]) # add second best or all algorithms that have an ERT # within a factor of f_factor of the best: secondbest_ERT = np.infty secondbest_str = '' secondbest_included = False for astring in j: currdictalg = dictAlg[astring].dictByDim() if currdictalg.has_key(d): curralgdata = currdictalg[d][f - 1] currERT = curralgdata.detERT([t])[0] if (astring != best.algs[i]): if (currERT < secondbest_ERT): secondbest_ERT = currERT secondbest_str = astring if (currERT <= best.detERT([t])[0] * f_factor): selectedAlgsPerProblemDF.append(astring) secondbest_included = True if not (secondbest_included) and (secondbest_str != ''): selectedAlgsPerProblemDF.append(secondbest_str) if len(selectedAlgsPerProblemDF) > 0: selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF print 'pre-processing of function', f, 'done.' print 'loading of best algorithm(s) data done.' countsperalgorithm = {} for (d, f) in selectedAlgsPerProblem: print 'dimension:', d, ', function:', f setofalgs = set(selectedAlgsPerProblem[d, f]) # now count how often algorithm a is best for the extracted targets for a in setofalgs: # use setdefault to initialize with zero if a entry not existant: countsperalgorithm.setdefault((d, a), 0) countsperalgorithm[(d, a)] += selectedAlgsPerProblem[d, f].count(a) selectedalgsperdimension = {} for (d, a) in sorted(countsperalgorithm): if not selectedalgsperdimension.has_key(d): selectedalgsperdimension[d] = [] selectedalgsperdimension[d].append((countsperalgorithm[(d, a)], a)) for d in sorted(selectedalgsperdimension): print d, 'D:' for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True): print count, alg print '\n' print " done." return selectedalgsperdimension
def main(dictAlg, sortedAlgs=None, target=ftarget_default, outputdir='ppdata', verbose=True): """From a DataSetList, returns figures showing the scaling: ERT/dim vs dim. One function and one target per figure. ``target`` can be a scalar, a list with one element or a ``pproc.TargetValues`` instance with one target. ``sortedAlgs`` is a list of string-identifies (folder names) """ # target becomes a TargetValues "list" with one element target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target) latex_commands_filename = os.path.join(outputdir, 'bbob_pproc_commands.tex') assert isinstance(target, pproc.TargetValues) if len(target) != 1: raise ValueError('only a single target can be managed in ppfigs, ' + str(len(target)) + ' targets were given') dictFunc = pproc.dictAlgByFun(dictAlg) if sortedAlgs is None: sortedAlgs = sorted(dictAlg.keys()) if not os.path.isdir(outputdir): os.mkdir(outputdir) for f in dictFunc: filename = os.path.join(outputdir,'ppfigs_f%03d' % (f)) handles = [] fix_styles(len(sortedAlgs)) # for i, alg in enumerate(sortedAlgs): dictDim = dictFunc[f][alg].dictByDim() # this does not look like the most obvious solution #Collect data dimert = [] ert = [] dimnbsucc = [] ynbsucc = [] nbsucc = [] dimmaxevals = [] maxevals = [] dimmedian = [] medianfes = [] for dim in sorted(dictDim): assert len(dictDim[dim]) == 1 entry = dictDim[dim][0] data = generateData(entry, target((f, dim))[0]) # TODO: here we might want a different target for each function if 1 < 3 or data[2] == 0: # No success dimmaxevals.append(dim) maxevals.append(float(data[3])/dim) if data[2] > 0: dimmedian.append(dim) medianfes.append(data[4]/dim) dimert.append(dim) ert.append(float(data[0])/dim) if data[1] < 1.: dimnbsucc.append(dim) ynbsucc.append(float(data[0])/dim) nbsucc.append('%d' % data[2]) # Draw lines tmp = plt.plot(dimert, ert, **styles[i]) #label=alg, ) plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color')) # For legend # tmp = plt.plot([], [], label=alg.replace('..' + os.sep, '').strip(os.sep), **styles[i]) tmp = plt.plot([], [], label=alg.split(os.sep)[-1], **styles[i]) plt.setp(tmp[0], markersize=12., markeredgecolor=plt.getp(tmp[0], 'color')) if dimmaxevals: tmp = plt.plot(dimmaxevals, maxevals, **styles[i]) plt.setp(tmp[0], markersize=20, #label=alg, markeredgecolor=plt.getp(tmp[0], 'color'), markeredgewidth=1, markerfacecolor='None', linestyle='None') handles.append(tmp) #tmp2 = plt.plot(dimmedian, medianfes, ls='', marker='+', # markersize=30, markeredgewidth=5, # markeredgecolor=plt.getp(tmp, 'color'))[0] #for i, n in enumerate(nbsucc): # plt.text(dimnbsucc[i], numpy.array(ynbsucc[i])*1.85, n, # verticalalignment='bottom', # horizontalalignment='center') if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgdata = [] dimbestalg = list(df[0] for df in bestalg.bestalgentries2009 if df[1] == f) dimbestalg.sort() dimbestalg2 = [] for d in dimbestalg: entry = bestalg.bestalgentries2009[(d, f)] tmp = entry.detERT(target((f, d)))[0] if numpy.isfinite(tmp): bestalgdata.append(float(tmp)/d) dimbestalg2.append(d) tmp = plt.plot(dimbestalg2, bestalgdata, color=refcolor, linewidth=10, marker='d', markersize=25, markeredgecolor=refcolor, zorder=-1 #label='best 2009', ) handles.append(tmp) if show_significance: # plot significance-stars xstar, ystar = [], [] dims = sorted(pproc.dictAlgByDim(dictFunc[f])) for i, dim in enumerate(dims): datasets = pproc.dictAlgByDim(dictFunc[f])[dim] assert all([len(datasets[ialg]) == 1 for ialg in sortedAlgs if datasets[ialg]]) dsetlist = [datasets[ialg][0] for ialg in sortedAlgs if datasets[ialg]] if len(dsetlist) > 1: arzp, arialg = toolsstats.significance_all_best_vs_other(dsetlist, target((f, dim))) if arzp[0][1] * len(dims) < show_significance: ert = dsetlist[arialg[0]].detERT(target((f, dim)))[0] if ert < numpy.inf: xstar.append(dim) ystar.append(ert/dim) plt.plot(xstar, ystar, 'k*', markerfacecolor=None, markeredgewidth=2, markersize=0.5*styles[0]['markersize']) if funInfos: plt.gca().set_title(funInfos[f]) isLegend = False if legend: plotLegend(handles) elif 1 < 3: if f in (1, 24, 101, 130) and len(sortedAlgs) < 6: # 6 elements at most in the boxed legend isLegend = True beautify(legend=isLegend, rightlegend=legend) plt.text(plt.xlim()[0], plt.ylim()[0], 'target ' + target.label_name() + ': ' + target.label(0)) # TODO: check saveFigure(filename, verbose=verbose) plt.close() # generate commands in tex file: try: abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' alg_definitions = [] for i in range(len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) alg_definitions.append((', ' if i > 0 else '') + '%s:%s' % (symb, '\\algorithm' + abc[i % len(abc)])) toolsdivers.prepend_to_file(latex_commands_filename, [#'\\providecommand{\\bbobppfigsftarget}{\\ensuremath{10^{%s}}}' # % target.loglabel(0), # int(numpy.round(numpy.log10(target))), '\\providecommand{\\bbobppfigslegend}[1]{', scaling_figure_caption(target), 'Legend: '] + alg_definitions + ['}'] ) toolsdivers.prepend_to_file(latex_commands_filename, ['\\providecommand{\\bbobECDFslegend}[1]{', ecdfs_figure_caption(target), '}'] ) if verbose: print 'Wrote commands and legend to %s' % filename # this is obsolete (however check templates) filename = os.path.join(outputdir,'ppfigs.tex') f = open(filename, 'w') f.write('% Do not modify this file: calls to post-processing software' + ' will overwrite any modification.\n') f.write('Legend: ') for i in range(0, len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) f.write((', ' if i > 0 else '') + '%s:%s' % (symb, writeLabels(sortedAlgs[i]))) f.close() if verbose: print '(obsolete) Wrote legend in %s' % filename except IOError: raise handles.append(tmp) if funInfos: plt.gca().set_title(funInfos[f]) beautify(rightlegend=legend) if legend: plotLegend(handles) else: if f in (1, 24, 101, 130): plt.legend() saveFigure(filename, figFormat=genericsettings.fig_formats, verbose=verbose) plt.close()
def main(argv=None): """Main routine for post-processing the data of multiple algorithms. Keyword arguments: argv -- list of strings containing options and arguments. If not provided, sys.argv is accessed. argv must list folders containing BBOB data files. Each of these folders should correspond to the data of ONE algorithm and should be listed in algorithmshortinfos.txt, a file from the bbob_pproc.compall package listing the information of various algorithms treated using bbob_pproc.dataoutput Furthermore, argv can begin with, in any order, facultative option flags listed below. -h, --help display this message -v, --verbose verbose mode, prints out operations. When not in verbose mode, no output is to be expected, except for errors. -o, --output-dir OUTPUTDIR change the default output directory ('defaultoutputdirectory') to OUTPUTDIR --noise-free, --noisy restrain the post-processing to part of the data set only. Actually quicken the post-processing since it loads only part of the pickle files. --tab-only, --perfprof-only these options can be used to output respectively the comparison tex tables or the performance profiles only. A combination of any two of these options results in no output. Exceptions raised: Usage -- Gives back a usage message. Examples: * Calling the runcompall.py interface from the command line: $ python bbob_pproc/runcompall.py -v * Loading this package and calling the main from the command line (requires that the path to this package is in python search path): $ python -m bbob_pproc.runcompall -h This will print out this help message. * From the python interactive shell (requires that the path to this package is in python search path): >>> from bbob_pproc import runcompall >>> runcompall.main('-o outputfolder folder1 folder2'.split()) This will execute the post-processing on the data found in folder1 and folder2. The -o option changes the output folder from the default cmpalldata to outputfolder. * Generate post-processing data for some algorithms: $ python runcompall.py AMALGAM BFGS CMA-ES """ if argv is None: argv = sys.argv[1:] try: try: opts, args = getopt.getopt(argv, "hvo:", ["help", "output-dir=", "noisy", "noise-free", "perfprof-only", "tab-only", "verbose"]) except getopt.error, msg: raise Usage(msg) if not (args): usage() sys.exit() verbose = False outputdir = 'cmpalldata' isNoisy = False isNoiseFree = False isPer = True isTab = True #Process options for o, a in opts: if o in ("-v","--verbose"): verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("-o", "--output-dir"): outputdir = a elif o == "--noisy": isNoisy = True elif o == "--noise-free": isNoiseFree = True elif o == "--tab-only": isPer = False isEff = False elif o == "--perfprof-only": isEff = False isTab = False else: assert False, "unhandled option" if (not verbose): warnings.simplefilter('ignore') print ("BBOB Post-processing: will generate comparison " + "data in folder %s" % outputdir) print " this might take several minutes." dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose) if not dsList: sys.exit() for i in dictAlg: if isNoisy and not isNoiseFree: dictAlg[i] = dictAlg[i].dictByNoise().get('nzall', DataSetList()) elif isNoiseFree and not isNoisy: dictAlg[i] = dictAlg[i].dictByNoise().get('noiselessall', DataSetList()) tmp = set((j.algId, j.comment) for j in dictAlg[i]) for j in tmp: if not dataoutput.isListed(j): dataoutput.updateAlgorithmInfo(j, verbose=verbose) for i in dsList: if not i.dim in (2, 3, 5, 10, 20): continue # Deterministic algorithms if i.algId in ('Original DIRECT', ): tmpInstancesOfInterest = instancesOfInterestDet else: tmpInstancesOfInterest = instancesOfInterest if ((dict((j, i.itrials.count(j)) for j in set(i.itrials)) < tmpInstancesOfInterest) and (dict((j, i.itrials.count(j)) for j in set(i.itrials)) < instancesOfInterest2010)): warnings.warn('The data of %s do not list ' %(i) + 'the correct instances ' + 'of function F%d or the ' %(i.funcId) + 'correct number of trials for each.') # group targets: dictTarget = {} for t in sorted(set(single_target_function_values + summarized_target_function_values)): tmpdict = dict.fromkeys(((f, d) for f in range(0, 25) + range(101, 131) for d in (2, 3, 5, 10, 20, 40)), t) stmp = 'E' if t == 1: stmp = 'E-' # dictTarget['_f' + stmp + '%2.1f' % numpy.log10(t)] = (tmpdict, ) if t in single_target_function_values: dictTarget['_f' + stmp + '%02d' % numpy.log10(t)] = (tmpdict, ) if t in summarized_target_function_values: dictTarget.setdefault('_allfs', []).append(tmpdict) if not os.path.exists(outputdir): os.mkdir(outputdir) if verbose: print 'Folder %s was created.' % (outputdir) # Performance profiles if isPer: dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): for k, t in dictTarget.iteritems(): #set_trace() ppperfprof.main(entries, target=t, order=sortedAlgs, plotArgs=algPlotInfos, outputdir=outputdir, info=('%02d%s_%s' % (d, k, ng)), verbose=verbose) organizeRTDpictures.do(outputdir) print "ECDFs of ERT figures done." if isTab: allmintarget, allertbest = detTarget(dsList) pptables.tablemanyalgonefunc(dictAlg, allmintarget, allertbest, sortedAlgs, outputdir) print "Comparison tables done."
def main(dictAlg, sortedAlgs, target=1e-8, outputdir='ppdata', verbose=True): """From a DataSetList, returns figures showing the scaling: ERT/dim vs dim. One function and one target per figure. sortedAlgs is a list of string-identifies (folder names) """ dictFunc = pproc.dictAlgByFun(dictAlg) for f in dictFunc: filename = os.path.join(outputdir,'ppfigs_f%03d' % (f)) handles = [] fix_styles(len(sortedAlgs)) # for i, alg in enumerate(sortedAlgs): dictDim = dictFunc[f][alg].dictByDim() #Collect data dimert = [] ert = [] dimnbsucc = [] ynbsucc = [] nbsucc = [] dimmaxevals = [] maxevals = [] dimmedian = [] medianfes = [] for dim in sorted(dictDim): assert len(dictDim[dim]) == 1 entry = dictDim[dim][0] data = generateData(entry, target) # TODO: here we might want a different target for each function if 1 < 3 or data[2] == 0: # No success dimmaxevals.append(dim) maxevals.append(float(data[3])/dim) if data[2] > 0: dimmedian.append(dim) medianfes.append(data[4]/dim) dimert.append(dim) ert.append(float(data[0])/dim) if data[1] < 1.: dimnbsucc.append(dim) ynbsucc.append(float(data[0])/dim) nbsucc.append('%d' % data[2]) # Draw lines tmp = plt.plot(dimert, ert, **styles[i]) #label=alg, ) plt.setp(tmp[0], markeredgecolor=plt.getp(tmp[0], 'color')) # For legend # tmp = plt.plot([], [], label=alg.replace('..' + os.sep, '').strip(os.sep), **styles[i]) tmp = plt.plot([], [], label=alg.split(os.sep)[-1], **styles[i]) plt.setp(tmp[0], markersize=12., markeredgecolor=plt.getp(tmp[0], 'color')) if dimmaxevals: tmp = plt.plot(dimmaxevals, maxevals, **styles[i]) plt.setp(tmp[0], markersize=20, #label=alg, markeredgecolor=plt.getp(tmp[0], 'color'), markeredgewidth=1, markerfacecolor='None', linestyle='None') handles.append(tmp) #tmp2 = plt.plot(dimmedian, medianfes, ls='', marker='+', # markersize=30, markeredgewidth=5, # markeredgecolor=plt.getp(tmp, 'color'))[0] #for i, n in enumerate(nbsucc): # plt.text(dimnbsucc[i], numpy.array(ynbsucc[i])*1.85, n, # verticalalignment='bottom', # horizontalalignment='center') if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgdata = [] dimbestalg = list(df[0] for df in bestalg.bestalgentries2009 if df[1] == f) dimbestalg.sort() dimbestalg2 = [] for d in dimbestalg: entry = bestalg.bestalgentries2009[(d, f)] tmp = entry.detERT([target])[0] if numpy.isfinite(tmp): bestalgdata.append(float(tmp)/d) dimbestalg2.append(d) tmp = plt.plot(dimbestalg2, bestalgdata, color=refcolor, linewidth=10, marker='d', markersize=25, markeredgecolor=refcolor, zorder=-1 #label='best 2009', ) handles.append(tmp) if show_significance: # plot significance-stars xstar, ystar = [], [] dims = sorted(pproc.dictAlgByDim(dictFunc[f])) for i, dim in enumerate(dims): datasets = pproc.dictAlgByDim(dictFunc[f])[dim] assert all([len(datasets[ialg]) == 1 for ialg in sortedAlgs if datasets[ialg]]) dsetlist = [datasets[ialg][0] for ialg in sortedAlgs if datasets[ialg]] if len(dsetlist) > 1: arzp, arialg = toolsstats.significance_all_best_vs_other(dsetlist, [target]) if arzp[0][1] * len(dims) < 0.05: ert = dsetlist[arialg[0]].detERT([target])[0] if ert < numpy.inf: xstar.append(dim) ystar.append(ert/dim) plt.plot(xstar, ystar, 'k*', markerfacecolor=None, markeredgewidth=2, markersize=0.5*styles[0]['markersize']) if funInfos: plt.gca().set_title(funInfos[f]) isLegend = False if legend: plotLegend(handles) elif 1 < 3: if f in (1, 24, 101, 130) and len(sortedAlgs) < 6: # 6 elements at most in the boxed legend isLegend = True beautify(legend=isLegend, rightlegend=legend) plt.text(plt.xlim()[0], plt.ylim()[0], 'ftarget=%.0e' % target) saveFigure(filename, verbose=verbose) plt.close() # generate commands in tex file: try: abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' alg_definitions = [] for i in range(len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) alg_definitions.append((', ' if i > 0 else '') + '%s:%s' % (symb, '\\algorithm' + abc[i % len(abc)])) filename = os.path.join(outputdir, 'bbob_pproc_commands.tex') toolsdivers.prepend_to_file(filename, ['\\providecommand{\\bbobppfigsftarget}{\\ensuremath{10^{%d}}}' % int(numpy.round(numpy.log10(target))), '\\providecommand{\\bbobppfigslegend}[1]{', scaling_figure_legend, 'Legend: '] + alg_definitions + ['}'] ) if verbose: print 'Wrote commands and legend to %s' % filename # this is obsolete (however check templates) filename = os.path.join(outputdir,'ppfigs.tex') f = open(filename, 'w') f.write('% Do not modify this file: calls to post-processing software' + ' will overwrite any modification.\n') f.write('Legend: ') for i in range(0, len(sortedAlgs)): symb = r'{%s%s}' % (color_to_latex(styles[i]['color']), marker_to_latex(styles[i]['marker'])) f.write((', ' if i > 0 else '') + '%s:%s' % (symb, writeLabels(sortedAlgs[i]))) f.close() if verbose: print '(obsolete) Wrote legend in %s' % filename except IOError: raise handles.append(tmp) if funInfos: plt.gca().set_title(funInfos[f]) beautify(rightlegend=legend) if legend: plotLegend(handles) else: if f in (1, 24, 101, 130): plt.legend() saveFigure(filename, figFormat=genericsettings.fig_formats, verbose=verbose) plt.close()