def main(dictAlg, outputdir='.', verbose=True): """Main routine for generating convergence plots """ global warned # bind variable warned into this scope dictFun = pproc.dictAlgByFun(dictAlg) for l in dictFun: # l appears to be the function id!? for i in dictFun[l]: # please, what is i??? appears to be the algorithm-key plt.figure() if 1 < 3: # no algorithm name in filename, as everywhere else figurename = "ppconv_" + "f%03d" % l else: # previous version with algorithm name, but this is not very practical later if type(i) in (list, tuple): figurename = "ppconv_plot_" + i[0] + "_f" + str(l) else: try: figurename = "ppconv_plot_" + dictFun[l][i].algId + "_f" + str(l) except AttributeError: # this is a (rather desperate) bug-fix attempt that works for the unit test figurename = "ppconv_plot_" + dictFun[l][i][0].algId + "_f" + str(l) plt.xlabel('number of function evaluations / dimension') plt.ylabel('Median of fitness') plt.grid() ax = plt.gca() ax.set_yscale("log") ax.set_xscale("log") for j in dictFun[l][i]: # please, what is j??? a dataset dimList_b = [] dimList_f = [] dimList_b.append(j.funvals[:,0]) dimList_f.append(j.funvals[:,1:]) bs, fs= rearrange(dimList_b, dimList_f) labeltext=str(j.dim)+"D" try: if 11 < 3: plt.errorbar(bs[0] / j.dim, fs[0][0], yerr = [fs[0][1], fs[0][2]], label = labeltext) else: plt.errorbar(bs[0] / j.dim, fs[0][0], label = labeltext) except FloatingPointError: # that's a bit of a hack if 1 < 3 or not warned: print('Warning: floating point error when plotting errorbars, ignored') warned = True beautify() saveFigure(os.path.join(outputdir, figurename.replace(' ','')), genericsettings.getFigFormats(), verbose=verbose) plt.close() try: algname = str(dictFun[l].keys()[0][0]) except KeyError: algname = str(dictFun[l].keys()[0]) save_single_functions_html(os.path.join(outputdir, 'ppconv'), algname) # first try print("Convergence plots done.")
def main(dictAlg, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][ 0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ( 'Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) if displaybest2009: #set_trace() if not bestalg.bestalgentries2009: bestalg.loadBBOB2009() bestalgentry = bestalg.bestalgentries2009[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[ bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = { 'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1 } lines.append( plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE=0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append( plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir, 'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write( r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len( order ) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write( r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir, 'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys()))) text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join( (str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' % (dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED) if close_figure: plt.close()
def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) displaybest2009 = not isBiobjective #disabled until we find the bug if displaybest2009: #set_trace() bestalgentries = bestalg.loadBestAlgorithm(isBiobjective) bestalgentry = bestalgentries[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len(order) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f') text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join((str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' %(dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED ) if close_figure: plt.close()
if (dict((j, i.instancenumbers.count(j)) for j in set(i.instancenumbers)) < inset.instancesOfInterest): warnings.warn('The data of %s do not list ' % (i) + 'the correct instances ' + 'of function F%d.' % (i.funcId)) plt.rc("axes", **inset.rcaxes) plt.rc("xtick", **inset.rctick) plt.rc("ytick", **inset.rctick) plt.rc("font", **inset.rcfont) plt.rc("legend", **inset.rclegend) plt.rc('pdf', fonttype=42) ppfig.save_single_functions_html( os.path.join(outputdir, genericsettings.many_algorithm_file_name), '', # algorithms names are clearly visible in the figure algorithmCount=ppfig.AlgorithmCount.MANY) ppfig.copy_js_files(outputdir) # convergence plots if genericsettings.isConv: ppconverrorbars.main(dictAlg, outputdir, genericsettings.verbose) # empirical cumulative distribution functions (ECDFs) aka Data profiles if genericsettings.isRLDistr: config.config() # ECDFs per noise groups dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems():
if dict((j, i.instancenumbers.count(j)) for j in set(i.instancenumbers)) < inset.instancesOfInterest: warnings.warn( "The data of %s do not list " % (i) + "the correct instances " + "of function F%d." % (i.funcId) ) plt.rc("axes", **inset.rcaxes) plt.rc("xtick", **inset.rctick) plt.rc("ytick", **inset.rctick) plt.rc("font", **inset.rcfont) plt.rc("legend", **inset.rclegend) plt.rc("pdf", fonttype=42) ppfig.save_single_functions_html( os.path.join(outputdir, genericsettings.many_algorithm_file_name), "", # algorithms names are clearly visible in the figure algorithmCount=ppfig.AlgorithmCount.MANY, ) ppfig.copy_js_files(outputdir) # convergence plots if genericsettings.isConv: ppconverrorbars.main(dictAlg, outputdir, genericsettings.verbose) # empirical cumulative distribution functions (ECDFs) aka Data profiles if genericsettings.isRLDistr: config.config() # ECDFs per noise groups dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): dictDim = pproc.dictAlgByDim(tmpdictAlg)