def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, function_targets_line=True): # [1, 13, 101] """Generate one table per func with results of multiple algorithms.""" """Difference with the first version: * numbers aligned using the decimal separator * premices for dispersion measure * significance test against best algorithm * table width... Takes ``targetsOfInterest`` from this file as "input argument" to compute the desired target values. ``targetsOfInterest`` might be configured via config. """ # TODO: method is long, terrible to read, split if possible bestalgentries = bestalg.loadBestAlgorithm(isBiobjective) # Sort data per dimension and function dictData = {} dsListperAlg = list(dictAlg[i] for i in sortedAlgs) for n, entries in enumerate(dsListperAlg): tmpdictdim = entries.dictByDim() for d in tmpdictdim: tmpdictfun = tmpdictdim[d].dictByFunc() for f in tmpdictfun: dictData.setdefault((d, f), {})[n] = tmpdictfun[f] nbtests = len(dictData) funInfos = ppfigparam.read_fun_infos(isBiobjective) for df in dictData: # Generate one table per df # first update targets for each dimension-function pair if needed: targets = targetsOfInterest((df[1], df[0])) targetf = targets[-1] # best 2009 refalgentry = bestalgentries[df] refalgert = refalgentry.detERT(targets) refalgevals = (refalgentry.detEvals((targetf, ))[0][0]) refalgnbruns = len(refalgevals) refalgnbsucc = numpy.sum(numpy.isnan(refalgevals) == False) # Process the data # The following variables will be lists of elements each corresponding # to an algorithm algnames = [] #algdata = [] algerts = [] algevals = [] algdisp = [] algnbsucc = [] algnbruns = [] algmedmaxevals = [] algmedfinalfunvals = [] algtestres = [] algentries = [] for n in sorted(dictData[df].keys()): entries = dictData[df][n] # the number of datasets for a given dimension and function (df) # should be strictly 1. TODO: find a way to warn # TODO: do this checking before... why wasn't it triggered by ppperprof? if len(entries) > 1: print entries txt = ("There is more than a single entry associated with " "folder %s on %d-D f%d." % (sortedAlgs[n], df[0], df[1])) raise Exception(txt) entry = entries[0] algentries.append(entry) algnames.append(sortedAlgs[n]) evals = entry.detEvals(targets) #tmpdata = [] tmpdisp = [] tmpert = [] for i, e in enumerate(evals): succ = (numpy.isnan(e) == False) ec = e.copy() # note: here was the previous bug (changes made in e also appeared in evals !) ec[succ == False] = entry.maxevals[succ == False] ert = toolsstats.sp(ec, issuccessful=succ)[0] #tmpdata.append(ert/refalgert[i]) if succ.any(): tmp = toolsstats.drawSP(ec[succ], entry.maxevals[succ == False], [10, 50, 90], samplesize=samplesize)[0] tmpdisp.append((tmp[-1] - tmp[0])/2.) else: tmpdisp.append(numpy.nan) tmpert.append(ert) algerts.append(tmpert) algevals.append(evals) #algdata.append(tmpdata) algdisp.append(tmpdisp) algmedmaxevals.append(numpy.median(entry.maxevals)) algmedfinalfunvals.append(numpy.median(entry.finalfunvals)) #algmedmaxevals.append(numpy.median(entry.maxevals)/df[0]) #algmedfinalfunvals.append(numpy.median(entry.finalfunvals)) algtestres.append(significancetest(refalgentry, entry, targets)) # determine success probability for Df = 1e-8 e = entry.detEvals((targetf ,))[0] algnbsucc.append(numpy.sum(numpy.isnan(e) == False)) algnbruns.append(len(e)) # Process over all data # find best values... nalgs = len(dictData[df]) maxRank = 1 + numpy.floor(0.14 * nalgs) # number of algs to be displayed in bold isBoldArray = [] # Point out the best values algfinaldata = [] # Store median function values/median number of function evaluations tmptop = getTopIndicesOfColumns(algerts, maxRank=maxRank) for i, erts in enumerate(algerts): tmp = [] for j, ert in enumerate(erts): # algi targetj tmp.append(i in tmptop[j] or (nalgs > 7 and algerts[i][j] <= 3. * refalgert[j])) isBoldArray.append(tmp) algfinaldata.append((algmedfinalfunvals[i], algmedmaxevals[i])) # significance test of best given algorithm against all others best_alg_idx = numpy.array(algerts).argsort(0)[0, :] # indexed by target index significance_versus_others = significance_all_best_vs_other(algentries, targets, best_alg_idx)[0] # Create the table table = [] tableHtml = [] spec = r'@{}c@{}|*{%d}{@{\,}r@{}X@{\,}}|@{}r@{}@{}l@{}' % (len(targets)) # in case StrLeft not working: replaced c@{} with l@{ } spec = r'@{}c@{}|*{%d}{@{}r@{}X@{}}|@{}r@{}@{}l@{}' % (len(targets)) # in case StrLeft not working: replaced c@{} with l@{ } extraeol = [] # Generate header lines if with_table_heading: header = funInfos[df[1]] if df[1] in funInfos.keys() else 'f%d' % df[1] table.append([r'\multicolumn{%d}{@{\,}c@{\,}}{{\textbf{%s}}}' % (2 * len(targets) + 2, header)]) extraeol.append('') if function_targets_line is True or (function_targets_line and df[1] in function_targets_line): if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): curline = [r'\#FEs/D'] curlineHtml = ['<thead>\n<tr>\n<th>#FEs/D<br>REPLACEH</th>\n'] counter = 1 for i in targetsOfInterest.labels(): curline.append(r'\multicolumn{2}{@{}c@{}}{%s}' % i) curlineHtml.append('<td>%s<br>REPLACE%d</td>\n' % (i, counter)) counter += 1 else: curline = [r'$\Delta f_\mathrm{opt}$'] curlineHtml = ['<thead>\n<tr>\n<th>Δ f<sub>opt</sub><br>REPLACEH</th>\n'] counter = 1 for t in targets: curline.append(r'\multicolumn{2}{@{\,}X@{\,}}{%s}' % writeFEvals2(t, precision=1, isscientific=True)) curlineHtml.append('<td>%s<br>REPLACE%d</td>\n' % (writeFEvals2(t, precision=1, isscientific=True), counter)) counter += 1 # curline.append(r'\multicolumn{2}{@{\,}X@{}|}{%s}' # % writeFEvals2(targets[-1], precision=1, isscientific=True)) curline.append(r'\multicolumn{2}{@{}l@{}}{\#succ}') curlineHtml.append('<td>#succ<br>REPLACEF</td>\n</tr>\n</thead>\n') table.append(curline) extraeol.append(r'\hline') # extraeol.append(r'\hline\arrayrulecolor{tableShade}') curline = [r'ERT$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]] replaceValue = 'ERT<sub>best</sub>' if with_table_heading else ('<b>f%d</b>' % df[1]) curlineHtml = [item.replace('REPLACEH', replaceValue) for item in curlineHtml] if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): # write ftarget:fevals counter = 1 for i in xrange(len(refalgert[:-1])): temp="%.1e" %targetsOfInterest((df[1], df[0]))[i] if temp[-2]=="0": temp=temp[:-2]+temp[-1] curline.append(r'\multicolumn{2}{@{}c@{}}{\textit{%s}:%s \quad}' % (temp, writeFEvalsMaxPrec(refalgert[i], 2))) replaceValue = '<i>%s</i>:%s' % (temp, writeFEvalsMaxPrec(refalgert[i], 2)) curlineHtml = [item.replace('REPLACE%d' % counter, replaceValue) for item in curlineHtml] counter += 1 temp="%.1e" %targetsOfInterest((df[1], df[0]))[-1] if temp[-2]=="0": temp=temp[:-2]+temp[-1] curline.append(r'\multicolumn{2}{@{}c@{}|}{\textit{%s}:%s }' % (temp ,writeFEvalsMaxPrec(refalgert[-1], 2))) replaceValue = '<i>%s</i>:%s' % (temp, writeFEvalsMaxPrec(refalgert[-1], 2)) curlineHtml = [item.replace('REPLACE%d' % counter, replaceValue) for item in curlineHtml] else: # write #fevals of the reference alg counter = 1 for i in refalgert[:-1]: curline.append(r'\multicolumn{2}{@{}c@{}}{%s \quad}' % writeFEvalsMaxPrec(i, 2)) curlineHtml = [item.replace('REPLACE%d' % counter, writeFEvalsMaxPrec(i, 2)) for item in curlineHtml] counter += 1 curline.append(r'\multicolumn{2}{@{}c@{}|}{%s}' % writeFEvalsMaxPrec(refalgert[-1], 2)) curlineHtml = [item.replace('REPLACE%d' % counter, writeFEvalsMaxPrec(refalgert[-1], 2)) for item in curlineHtml] # write the success ratio for the reference alg tmp2 = numpy.sum(numpy.isnan(refalgevals) == False) # count the nb of success curline.append('%d' % (tmp2)) if tmp2 > 0: curline.append('/%d' % len(refalgevals)) replaceValue = '%d/%d' % (tmp2, len(refalgevals)) else: replaceValue = '%d' % tmp2 curlineHtml = [item.replace('REPLACEF', replaceValue) for item in curlineHtml] table.append(curline[:]) tableHtml.extend(curlineHtml[:]) tableHtml.append('<tbody>\n') extraeol.append('') #for i, gna in enumerate(zip((1, 2, 3), ('bla', 'blo', 'bli'))): #print i, gna, gno #set_trace() # Format data #if df == (5, 17): #set_trace() header = r'\providecommand{\ntables}{7}' for i, alg in enumerate(algnames): tableHtml.append('<tr>\n') #algname, entries, irs, line, line2, succ, runs, testres1alg in zip(algnames, #data, dispersion, isBoldArray, isItalArray, nbsucc, nbruns, testres): commandname = r'\alg%stables' % numtotext(i) # header += r'\providecommand{%s}{{%s}{}}' % (commandname, str_to_latex(strip_pathname(alg))) header += r'\providecommand{%s}{\StrLeft{%s}{\ntables}}' % (commandname, str_to_latex(strip_pathname1(alg))) curline = [commandname + r'\hspace*{\fill}'] # each list element becomes a &-separated table entry? curlineHtml = ['<th>%s</th>\n' % str_to_latex(strip_pathname1(alg))] for j, tmp in enumerate(zip(algerts[i], algdisp[i], # j is target index isBoldArray[i], algtestres[i])): ert, dispersion, isBold, testres = tmp alignment = '@{\,}X@{\,}' if j == len(algerts[i]) - 1: alignment = '@{\,}X@{\,}|' data = ert/refalgert[j] # write star for significance against all other algorithms str_significance_subsup = '' str_significance_subsup_html = '' if (len(best_alg_idx) > 0 and len(significance_versus_others) > 0 and i == best_alg_idx[j] and nbtests * significance_versus_others[j][1] < 0.05): logp = -numpy.ceil(numpy.log10(nbtests * significance_versus_others[j][1])) logp = numpy.min((9, logp)) # not messing up the format and handling inf str_significance_subsup = r"^{%s%s}" % (significance_vs_others_symbol, str(int(logp)) if logp > 1 else '') str_significance_subsup_html = '<sup>%s%s</sup>' % (significance_vs_others_symbol_html, str(int(logp)) if logp > 1 else '') # moved out of the above else: this was a bug!? z, p = testres if (nbtests * p) < 0.05 and data < 1. and z < 0.: if not numpy.isinf(refalgert[j]): tmpevals = algevals[i][j].copy() tmpevals[numpy.isnan(tmpevals)] = algentries[i].maxevals[numpy.isnan(tmpevals)] bestevals = refalgentry.detEvals(targets) bestevals, bestalgalg = (bestevals[0][0], bestevals[1][0]) bestevals[numpy.isnan(bestevals)] = refalgentry.maxevals[bestalgalg][numpy.isnan(bestevals)] tmpevals = numpy.array(sorted(tmpevals))[0:min(len(tmpevals), len(bestevals))] bestevals = numpy.array(sorted(bestevals))[0:min(len(tmpevals), len(bestevals))] #The conditions are now that ERT < ERT_best and # all(sorted(FEvals_best) > sorted(FEvals_current)). if numpy.isinf(refalgert[j]) or all(tmpevals < bestevals): nbstars = -numpy.ceil(numpy.log10(nbtests * p)) # tmp2[-1] += r'$^{%s}$' % superscript str_significance_subsup += r'_{%s%s}' % (significance_vs_ref_symbol, str(int(nbstars)) if nbstars > 1 else '') str_significance_subsup_html = '<sub>%s%s</sub>' % (significance_vs_ref_symbol_html, str(int(nbstars)) if nbstars > 1 else '') if str_significance_subsup: str_significance_subsup = '$%s$' % str_significance_subsup # format number in variable data if numpy.isnan(data): curline.append(r'\multicolumn{2}{%s}{.}' % alignment) else: if numpy.isinf(refalgert[j]): curline.append(r'\multicolumn{2}{%s}{\textbf{%s}\mbox{\tiny (%s)}%s}' % (alignment, writeFEvalsMaxPrec(algerts[i][j], 2), writeFEvalsMaxPrec(dispersion, precdispersion), str_significance_subsup)) curlineHtml.append('<td sorttable_customkey=\"%f\"><b>%s</b> (%s)%s</td>\n' % (algerts[i][j], writeFEvalsMaxPrec(algerts[i][j], 2), writeFEvalsMaxPrec(dispersion, precdispersion), str_significance_subsup_html)) continue tmp = writeFEvalsMaxPrec(data, precfloat, maxfloatrepr=maxfloatrepr) tmpHtml = writeFEvalsMaxPrec(data, precfloat, maxfloatrepr=maxfloatrepr) sortKey = data if data >= maxfloatrepr or data < 0.01: # either inf or scientific notation if numpy.isinf(data) and j == len(algerts[i]) - 1: tmp += r'\,\textit{%s}' % writeFEvalsMaxPrec(algfinaldata[i][1], 0, maxfloatrepr=maxfloatrepr) tmpHtml += '<i>%s</i>' % writeFEvalsMaxPrec(algfinaldata[i][1], 0, maxfloatrepr=maxfloatrepr) sortKey = algfinaldata[i][1] else: tmp = writeFEvalsMaxPrec(data, precscien, maxfloatrepr=data) if isBold: tmpHtml = '<b>%s</b>' % tmp tmp = r'\textbf{%s}' % tmp if not numpy.isnan(dispersion): tmpdisp = dispersion/refalgert[j] if tmpdisp >= maxfloatrepr or tmpdisp < 0.005: # TODO: hack tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=tmpdisp) else: tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=maxfloatrepr) tmp += r'\mbox{\tiny (%s)}' % tmpdisp tmpHtml += ' (%s)' % tmpdisp curline.append(r'\multicolumn{2}{%s}{%s%s}' % (alignment, tmp, str_significance_subsup)) tmpHtml = tmpHtml.replace('$\infty$', '∞') if (numpy.isinf(sortKey)): sortKey = sys.maxint curlineHtml.append('<td sorttable_customkey=\"%f\">%s%s</td>' % (sortKey, tmpHtml, str_significance_subsup_html)) else: tmp2 = tmp.split('.', 1) if len(tmp2) < 2: tmp2.append('') else: tmp2[-1] = '.' + tmp2[-1] if isBold: tmp3 = [] tmp3html = [] for k in tmp2: tmp3.append(r'\textbf{%s}' % k) tmp3html.append('<b>%s</b>' % k) tmp2 = tmp3 tmp2html = tmp3html else: tmp2html = [] tmp2html.extend(tmp2) if not numpy.isnan(dispersion): tmpdisp = dispersion/refalgert[j] if tmpdisp >= maxfloatrepr or tmpdisp < 0.01: tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=tmpdisp) else: tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=maxfloatrepr) tmp2[-1] += (r'\mbox{\tiny (%s)}' % (tmpdisp)) tmp2html[-1] += ' (%s)' % tmpdisp tmp2[-1] += str_significance_subsup tmp2html[-1] += str_significance_subsup_html curline.extend(tmp2) tmp2html = ("").join(str(item) for item in tmp2html) tmp2html = tmp2html.replace('$\infty$', '∞') curlineHtml.append('<td sorttable_customkey=\"%f\">%s</td>' % (data, tmp2html)) curline.append('%d' % algnbsucc[i]) curline.append('/%d' % algnbruns[i]) table.append(curline) curlineHtml.append('<td sorttable_customkey=\"%d\">%d/%d</td>\n' % (algnbsucc[i], algnbsucc[i], algnbruns[i])) tableHtml.extend(curlineHtml[:]) extraeol.append('') # Write table res = tableXLaTeX(table, spec=spec, extraeol=extraeol) try: filename = os.path.join(outputdir, 'pptables_f%03d_%02dD.tex' % (df[1], df[0])) f = open(filename, 'w') f.write(header + '\n') f.write(res) res = ("").join(str(item) for item in tableHtml) res = '\n<table class=\"sortable\" style=\"width:800px \">\n%s</table>\n<p/>\n' % res if df[0] in (5, 20): filename = os.path.join(outputdir, genericsettings.many_algorithm_file_name + '.html') lines = [] with open(filename) as infile: for line in infile: if '<!--' + 'pptablesf%03d%02dDHtml' % (df[1], df[0]) + '-->' in line: lines.append(res) lines.append(line) with open(filename, 'w') as outfile: for line in lines: outfile.write(line) if verbose: print 'Wrote table in %s' % filename except: raise else: f.close()
def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True): """Returns ERT1/ERT0 comparison figure.""" #plt.rc("axes", labelsize=20, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) # minfvalue = pproc.TargetValues.cast(minfvalue) funInfos = ppfigparam.read_fun_infos(dsList0.isBiobjective()) dictFun0 = dsList0.dictByFunc() dictFun1 = dsList1.dictByFunc() for func in set.intersection(set(dictFun0), set(dictFun1)): dictDim0 = dictFun0[func].dictByDim() dictDim1 = dictFun1[func].dictByDim() filename = os.path.join(outputdir,'ppfig2_f%03d' % (func)) dims = sorted(set.intersection(set(dictDim0), set(dictDim1))) handles = [] dataperdim = {} fvalueswitch = {} nbtests = 0 for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] except KeyError: continue nbtests += 1 # generateData: data = _generateData(entry0, entry1, fthresh=fthresh) dataperdim[dim] = data if len(data[0]) == 0 and len(data[1]) == 0: continue # TODO: hack, modify slightly so line goes to 'zero' if minfvalue: for d in data: tmp = d[:, 0] tmp[tmp == 0] = min(min(tmp[tmp > 0]), minfvalue)**2 # plot idx = np.isfinite(data[0][:, 1]) * np.isfinite(data[1][:, 1]) ydata = data[1][idx, 1]/data[0][idx, 1] kwargs = styles[i].copy() kwargs['label'] = '%2d-D' % dim tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3*linewidth) plt.setp(tmp[0], ls='--') # This is only one possibility: #idx = (data[0][:, 3] >= 5) * (data[1][:, 3] >= 5) idx = ((data[0][:, 1] <= 3 * np.median(entry0.maxevals)) * (data[1][:, 1] <= 3 * np.median(entry1.maxevals))) if not idx.any(): fvalueswitch[dim] = np.inf # Hack: fvalueswitch is the smallest value of f where the line # was still solid. continue fvalueswitch[dim] = min(data[0][idx, 0]) ydata = data[1][idx, 1]/data[0][idx, 1] tmp = plotUnifLogXMarkers(data[0][idx, 0], ydata, nbperdecade=1, logscale=True, **styles[i]) plt.setp(tmp[1], markersize=3*linewidth) beautify(xmin=minfvalue) #beautify() ax = plt.gca() # Freeze the boundaries ax.set_autoscale_on(False) #trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # Plot everything else for i, dim in enumerate(dimensions): try: entry0 = dictDim0[dim][0] entry1 = dictDim1[dim][0] data = dataperdim[dim] except KeyError: continue if len(data[0]) == 0 and len(data[1]) == 0: continue # annotation annotate(entry0, entry1, dim, minfvalue, nbtests=nbtests) tmp0 = np.isfinite(data[0][:, 1]) tmp1 = np.isfinite(data[1][:, 1]) idx = tmp0 * tmp1 if not idx.any(): continue #Do not plot anything else if it happens after minfvalue if data[0][idx, 0][-1] <= minfvalue: # hack for the legend continue # Determine which algorithm went further algstoppedlast = 0 algstoppedfirst = 1 if np.sum(tmp0) < np.sum(tmp1): algstoppedlast = 1 algstoppedfirst = 0 #marker if an algorithm stopped ydata = data[1][idx, 1]/data[0][idx, 1] plt.plot((data[0][idx, 0][-1], ), (ydata[-1], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4*linewidth) tmpy = ydata[-1] # plot probability of success line dataofinterest = data[algstoppedlast] tmp = np.nonzero(idx)[0][-1] # Why [0]? # add the last line for which both algorithm still have a success idx = (data[algstoppedfirst][:, 2] == 0.) * (dataofinterest[:, 2] > 0.) idx[tmp] = True if np.sum(idx) <= 1:#len(idx) == 0 or not idx.any(): continue ymin, ymax = plt.ylim() #orientation = -1 ybnd = ymin if algstoppedlast == 0: ybnd = ymax #orientation = 1 #ydata = orientation * dataofinterest[idx, 2] / 2 + 0.5 ydata = np.power(10, np.log10(ybnd) * (dataofinterest[idx, 2] -offset*(5-i)*np.log10(ymax/ymin)/np.abs(np.log10(ybnd)))) ls = '-' if dataofinterest[idx, 0][0] < fvalueswitch[dim]: ls = '--' tmp = plt.plot([dataofinterest[idx, 0][0]]*2, (tmpy, ydata[0]), **styles[i]) plt.setp(tmp, ls=ls, marker='') tmp = plt.plot((dataofinterest[idx, 0][0], ), (ydata[0], ), marker='D', ls='', color=styles[i]['color'], markeredgecolor=styles[i]['color'], markerfacecolor=styles[i]['color'], markersize=4*linewidth) kwargs = styles[i].copy() kwargs['ls'] = ls tmp = plotUnifLogXMarkers(dataofinterest[idx, 0], ydata, nbperdecade=1, logscale=True, **kwargs) plt.setp(tmp, markersize=3*linewidth) #Do not plot anything else if it happens after minfvalue if dataofinterest[idx, 0][-1] <= minfvalue: continue #plt.plot((dataofinterest[idx, 0][-1], ), (ydata[-1], ), marker='d', # color=styles[i]['color'], markeredgecolor=styles[i]['color'], # markerfacecolor=styles[i]['color'], markersize=4*linewidth) if func in funInfos.keys(): plt.title(funInfos[func]) if func in functions_with_legend: plt.legend(loc='best') # save saveFigure(filename, verbose=verbose) plt.close()
def main(dsList0, dsList1, outputdir, verbose=True): """Generate a scatter plot figure. TODO: """ #plt.rc("axes", labelsize=24, titlesize=24) #plt.rc("xtick", labelsize=20) #plt.rc("ytick", labelsize=20) #plt.rc("font", size=20) #plt.rc("legend", fontsize=20) dictFunc0 = dsList0.dictByFunc() dictFunc1 = dsList1.dictByFunc() funcs = set(dictFunc0.keys()) & set(dictFunc1.keys()) if isinstance(targets, pproc.RunlengthBasedTargetValues): linewidth = linewidth_rld_based else: linewidth = linewidth_default funInfos = ppfigparam.read_fun_infos(dsList0.isBiobjective()) for f in funcs: dictDim0 = dictFunc0[f].dictByDim() dictDim1 = dictFunc1[f].dictByDim() dims = set(dictDim0.keys()) & set(dictDim1.keys()) #set_trace() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue if linewidth: # plot all reliable ERT values as a line all_targets = np.array(sorted(set(entry0.target).union(entry1.target), reverse=True)) assert entry0.detSuccessRates([all_targets[0]]) == 1.0 assert entry1.detSuccessRates([all_targets[0]]) == 1.0 all_targets = all_targets[np.where(all_targets <= targets((f, d))[0])[0]] # xdata_all = np.array(entry0.detERT(all_targets)) ydata_all = np.array(entry1.detERT(all_targets)) # idx of reliable targets: last index where success rate >= 1/2 and ERT <= maxevals idx = [] for ari in (np.where(entry0.detSuccessRates(all_targets) >= 0.5)[0], np.where(entry1.detSuccessRates(all_targets) >= 0.5)[0], np.where(xdata_all <= max(entry0.maxevals))[0], np.where(ydata_all <= max(entry1.maxevals))[0] ): if len(ari): idx.append(ari[-1]) if len(idx) == 4: max_idx = min(idx) ## at least up to the most difficult given target ## idx = max((idx, np.where(all_targets >= targets((f, d))[-1])[0][-1])) xdata_all = xdata_all[:max_idx + 1] ydata_all = ydata_all[:max_idx + 1] idx = (numpy.isfinite(xdata_all)) * (numpy.isfinite(ydata_all)) assert idx.all() if idx.any(): plt.plot(xdata_all[idx], ydata_all[idx], colors[i], ls='solid', lw=linewidth, # TODO: ls has changed, check whether this works out clip_on=False) xdata = numpy.array(entry0.detERT(targets((f, d)))) ydata = numpy.array(entry1.detERT(targets((f, d)))) # plot "valid" data, those within maxevals idx = np.logical_and(xdata < entry0.mMaxEvals(), ydata < entry1.mMaxEvals()) # was: # (numpy.isinf(xdata) == False) * # (numpy.isinf(ydata) == False) * # (xdata < entry0.mMaxEvals()) * # (ydata < entry1.mMaxEvals())) if idx.any(): try: plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize, marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, clip_on=False) except KeyError: plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize, marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=3, clip_on=False) #try: # plt.scatter(xdata[idx], ydata[idx], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3) #except ValueError: # set_trace() # plot beyond maxevals but finite data idx = ((numpy.isinf(xdata) == False) * (numpy.isinf(ydata) == False) * np.logical_or(xdata >= entry0.mMaxEvals(), ydata >= entry1.mMaxEvals())) if idx.any(): try: plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize + markersize_addon_beyond_maxevals, marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=1, clip_on=False) except KeyError: plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize, marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=2, clip_on=False) #ax = plt.gca() ax = plt.axes() # plot data on the right edge idx = numpy.isinf(xdata) * (numpy.isinf(ydata) == False) if idx.any(): # This (seems to) transform inf to the figure limits!? trans = blend(ax.transAxes, ax.transData) #plt.scatter([1.]*numpy.sum(idx), ydata[idx], s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.]*numpy.sum(idx), ydata[idx], markersize=markersize + markersize_addon_beyond_maxevals, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=1, transform=trans, clip_on=False) except KeyError: plt.plot([1.]*numpy.sum(idx), ydata[idx], markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=2, transform=trans, clip_on=False) #set_trace() # plot data on the left edge idx = (numpy.isinf(xdata)==False) * numpy.isinf(ydata) if idx.any(): # This (seems to) transform inf to the figure limits!? trans = blend(ax.transData, ax.transAxes) # plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx), markersize=markersize + markersize_addon_beyond_maxevals, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=1, transform=trans, clip_on=False) except KeyError: plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=2, transform=trans, clip_on=False) # plot data in the top corner idx = numpy.isinf(xdata) * numpy.isinf(ydata) if idx.any(): # plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i], # facecolor='None', edgecolor=colors[i], linewidth=3, # transform=trans) try: plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx), markersize=markersize + markersize_addon_beyond_maxevals, ls='', marker=markers[i], markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=1, transform=ax.transAxes, clip_on=False) except KeyError: plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx), markersize=markersize, ls='', marker='x', markerfacecolor='None', markeredgecolor=colors[i], markeredgewidth=2, transform=ax.transAxes, clip_on=False) #set_trace() beautify() for i, d in enumerate(dimensions): try: entry0 = dictDim0[d][0] # should be only one element entry1 = dictDim1[d][0] # should be only one element except (IndexError, KeyError): continue minbnd, maxbnd = plt.xlim() plt.plot((entry0.mMaxEvals(), entry0.mMaxEvals()), # (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i], (max([minbnd, entry1.mMaxEvals()/max_evals_line_length]), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.plot(# (minbnd, entry0.mMaxEvals()), (max([minbnd, entry0.mMaxEvals()/max_evals_line_length]), entry0.mMaxEvals()), (entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-', color=colors[i], zorder=-1) plt.xlim(minbnd, maxbnd) plt.ylim(minbnd, maxbnd) #Set the boundaries again: they changed due to new plots. #plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i]) #plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i]) if f in funInfos.keys(): plt.ylabel(funInfos[f]) filename = os.path.join(outputdir, 'ppscatter_f%03d' % f) saveFigure(filename, verbose=verbose) if f == 1: algName1 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry1.algId)) algName0 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry0.algId)) save_single_functions_html( os.path.join(outputdir, genericsettings.two_algorithm_file_name), "%s vs %s" % (algName1, algName0), algorithmCount=AlgorithmCount.TWO) plt.close()