def rearrange(blist, flist): """Alligns the number of evaluations taken from the blist with the correpsning flist""" final_b=[] final_f=[] for i in range(0,len(blist)): #runs over dimensions erg_b = numpy.empty((0), float) erg_f = [numpy.empty ((0), float), numpy.empty ((0), float), numpy.empty ((0), float)] for j in range(0,len(blist[i])): #runs over function evaluations erg_b=numpy.append(erg_b,blist[i][j]) erg_f[0]=numpy.append(erg_f[0],numpy.median(flist[i][j])) erg_f[1]=numpy.append(erg_f[1],prctile(flist[i][j], [0.25])) erg_f[2]=numpy.append(erg_f[2],prctile(flist[i][j], [0.75])) final_b.append(erg_b) final_f.append(erg_f) return final_b, final_f
def rearrange(blist, flist): """Alligns the number of evaluations taken from the blist with the corresponding flist""" final_b=[] final_f=[] for i in range(0,len(blist)): #runs over dimensions erg_b = numpy.empty((0), float) erg_f = [numpy.empty ((0), float), numpy.empty ((0), float), numpy.empty ((0), float)] for j in range(0,len(blist[i])): #runs over function evaluations erg_b=numpy.append(erg_b,blist[i][j]) erg_f[0]=numpy.append(erg_f[0],numpy.median(flist[i][j])) erg_f[1]=numpy.append(erg_f[1],prctile(flist[i][j], [0.25])) erg_f[2]=numpy.append(erg_f[2],prctile(flist[i][j], [0.75])) final_b.append(erg_b) final_f.append(erg_f) return final_b, final_f
def generateData(dataSet, targetFuncValue): """Returns an array of results to be plotted. 1st column is ert, 2nd is the number of success, 3rd the success rate, 4th the sum of the number of function evaluations, and finally the median on successful runs. """ res = [] data = [] it = iter(reversed(dataSet.evals)) i = it.next() prev = np.array([np.nan] * len(i)) while i[0] <= targetFuncValue: prev = i try: i = it.next() except StopIteration: break data = prev[1:].copy() # keep only the number of function evaluations. succ = (np.isnan(data) == False) if succ.any(): med = toolsstats.prctile(data[succ], 50)[0] #Line above was modified at rev 3050 to make sure that we consider only #successful trials in the median else: med = np.nan data[np.isnan(data)] = dataSet.maxevals[np.isnan(data)] res = [] res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False)) res.append(np.mean(data)) #mean(FE) res.append(med) return np.array(res)
def generateData(dataSet, target): """Returns an array of results to be plotted. Oth column is ert, 1st is the success rate, 2nd the number of successes, 3rd the mean of the number of function evaluations, and 4th the median of number of function evaluations of successful runs or numpy.nan. """ res = [] data = dataSet.detEvals([target])[0] succ = (numpy.isnan(data) == False) data[numpy.isnan(data)] = dataSet.maxevals[numpy.isnan(data)] res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False)) res.append(numpy.mean(data)) if res[2] > 0: res.append(toolsstats.prctile(data[succ], 50)[0]) else: res.append(numpy.nan) res[3] = numpy.max(dataSet.maxevals) return res
# Plot target precision versus function evaluations # (swap x-y of previous figure) figure() for i in range(0, nbruns): loglog(evals[:, i], targets) grid() xlabel('Function Evaluations') ylabel('Targets') loglog(d.ert[d.target>=1e-8], d.target[d.target>=1e-8], lw=3, color='r', label='ert') legend() # Plot target precision versus function evaluations with error bars figure() # open a new figure from bbob_pproc.toolsstats import prctile q = array(list(prctile(i, [25, 50, 75]) for i in evals)) xmed = q[:, 1] xlow = xmed - q[:, 0] xhig = q[:, 2] - xmed xerr = vstack((xlow, xhig)) errorbar(xmed, targets, xerr=xerr, color='r', label='Median') xscale('log') yscale('log') xlabel('Function Evaluations') ylabel('Targets') grid() legend() # Empirical cumulative distribution function figure from bbob_pproc import pprldistr ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f0*_20.pickle'))
def plot2(dataset, **kwargs): """Plot function values versus function evaluations. Plus (+) markers for final function values and run lengths and dashed lines. Lines for the max and min and median. Plot markers for all quartiles when: * the first run stops * the median run stops * the maximum number of evaluations is reached * at 1/2 of the first run stops """ def _findfrombelow(data, x, log=True): if log: tmpdata = np.log(data) x = np.log(x) else: tmpdata = data return max(data[tmpdata - x <= 0]) prctiles = np.array((0, 25, 50, 75, 100)) medidx = 2 # get data data = [] for i in dataset.funvals[:, 1:]: data.append(toolsstats.prctile(i, prctiles)) data = np.asarray(data) xdata = dataset.funvals[:, 0] res = [] # plot res.extend(plt.plot(xdata[xdata <= np.median(dataset.maxevals)], data[xdata <= np.median(dataset.maxevals), medidx], **kwargs)) props = dict((i, plt.getp(res[-1], i)) for i in lineprops) res.extend(plt.plot(xdata[xdata <= min(dataset.maxevals)], data[xdata <= min(dataset.maxevals), 0], **props)) res.extend(plt.plot(xdata, data[:, -1], **props)) for i in res: plt.setp(i, 'marker', '') finalpoints = np.vstack((dataset.maxevals, dataset.finalfunvals)).T sortedfpoints = np.vstack(sorted(finalpoints, cmp=lambda x, y: cmp(list(x), list(y)))) # sort final points like a list of 2-element sequences res.extend(plt.plot(sortedfpoints[:, 0], sortedfpoints[:, 1], **props)) plt.setp(res[-1], marker='+', markeredgewidth=props['linewidth'], markersize=5*props['linewidth'], linestyle='') xmarkers = (_findfrombelow(xdata, min(dataset.maxevals) ** .5), min(dataset.maxevals), _findfrombelow(xdata, np.median(dataset.maxevals)), max(dataset.maxevals)) xmarkersprctile = (0, 0, 50, 100) for x, p in zip(xmarkers, xmarkersprctile): tmp = (p <= prctiles) res.extend(plt.plot(np.sum(tmp) * [x], data[xdata == x, tmp], **props)) plt.setp(res[-1], linestyle='') return res
def plot2(dataset, **kwargs): """Plot function values versus function evaluations. Plus (+) markers for final function values and run lengths and dashed lines. Lines for the max and min and median. Plot markers for all quartiles when: * the first run stops * the median run stops * the maximum number of evaluations is reached * at 1/2 of the first run stops """ def _findfrombelow(data, x, log=True): if log: tmpdata = np.log(data) x = np.log(x) else: tmpdata = data return max(data[tmpdata - x <= 0]) prctiles = np.array((0, 25, 50, 75, 100)) medidx = 2 # get data data = [] for i in dataset.funvals[:, 1:]: data.append(toolsstats.prctile(i, prctiles)) data = np.asarray(data) xdata = dataset.funvals[:, 0] res = [] # plot res.extend( plt.plot(xdata[xdata <= np.median(dataset.maxevals)], data[xdata <= np.median(dataset.maxevals), medidx], **kwargs)) props = dict((i, plt.getp(res[-1], i)) for i in lineprops) res.extend( plt.plot(xdata[xdata <= min(dataset.maxevals)], data[xdata <= min(dataset.maxevals), 0], **props)) res.extend(plt.plot(xdata, data[:, -1], **props)) for i in res: plt.setp(i, 'marker', '') finalpoints = np.vstack((dataset.maxevals, dataset.finalfunvals)).T sortedfpoints = np.vstack( sorted(finalpoints, cmp=lambda x, y: cmp(list(x), list(y)))) # sort final points like a list of 2-element sequences res.extend(plt.plot(sortedfpoints[:, 0], sortedfpoints[:, 1], **props)) plt.setp(res[-1], marker='+', markeredgewidth=props['linewidth'], markersize=5 * props['linewidth'], linestyle='') xmarkers = (_findfrombelow(xdata, min(dataset.maxevals)**.5), min(dataset.maxevals), _findfrombelow(xdata, np.median(dataset.maxevals)), max(dataset.maxevals)) xmarkersprctile = (0, 0, 50, 100) for x, p in zip(xmarkers, xmarkersprctile): tmp = (p <= prctiles) res.extend(plt.plot(np.sum(tmp) * [x], data[xdata == x, tmp], **props)) plt.setp(res[-1], linestyle='') return res
def plot(dsList, param='dim', targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)): """Generate plot of ERT vs param.""" dictparam = dsList.dictByParam(param) params = sorted(dictparam) # sorted because we draw lines # generate plot from dsList res = [] # collect data rawdata = {} for p in params: assert len(dictparam[p]) == 1 rawdata[p] = dictparam[p][0].detEvals(targets) # expect dictparam[p] to have only one element # plot lines for ERT xpltdata = params for i, t in enumerate(targets): ypltdata = [] for p in params: data = rawdata[p][i] unsucc = np.isnan(data) assert len(dictparam[p]) == 1 data[unsucc] = dictparam[p][0].maxevals # compute ERT ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False)) ypltdata.append(ert) res.extend( plt.plot(xpltdata, ypltdata, markersize=20, zorder=len(targets) - i, **styles[i])) # for the legend plt.plot([], [], markersize=10, label=' %+d' % (np.log10(targets[i])), **styles[i]) # plot median of successful runs for hardest target with a success for p in params: for i, t in enumerate(reversed( targets)): # targets has to be from hardest to easiest data = rawdata[p][i] data = data[np.isnan(data) == False] if len(data) > 0: median = toolsstats.prctile(data, 50.)[0] res.extend(plt.plot(p, median, styles[i]['color'], **medmarker)) break # plot average number of function evaluations for the hardest target xpltdata = [] ypltdata = [] for p in params: data = rawdata[p][0] # first target xpltdata.append(p) if (np.isnan(data) == False).all(): tmpdata = data.copy() assert len(dictparam[p]) == 1 tmpdata[np.isnan(data)] = dictparam[p][0].maxevals[np.isnan(data)] tmp = np.mean(tmpdata) else: tmp = np.nan # Check what happens when plotting NaN ypltdata.append(tmp) res.extend(plt.plot(xpltdata, ypltdata, **avgstyle)) # display numbers of successes for hardest target where there is still one success for p in params: for i, t in enumerate( targets): # targets has to be from hardest to easiest data = rawdata[p][i] unsucc = np.isnan(data) assert len(dictparam[p]) == 1 data[unsucc] = dictparam[p][0].maxevals # compute ERT ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False)) if srate == 1.: break elif succ > 0: res.append( plt.text(p, ert * 1.85, "%d" % succ, axes=plt.gca(), horizontalalignment="center", verticalalignment="bottom")) break return res
def generateTable(dsList, CrE=0., outputdir='.', info='default', verbose=True): """Generates ERT loss ratio tables. :param DataSetList dsList: input data set :param float CrE: crafting effort (see COCO documentation) :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ #Set variables prcOfInterest = [0, 10, 25, 50, 75, 90] for d, dsdim in dsList.dictByDim().iteritems(): res = [] maxevals = [] funcs = [] mFE = [] for i in dsdim: maxevals.append(max(i.ert[numpy.isinf(i.ert)==False])) funcs.append(i.funcId) mFE.append(max(i.maxevals)) maxevals = max(maxevals) mFE = max(mFE) EVALS = [2.*d] EVALS.extend(numpy.power(10., numpy.arange(1, numpy.log10(1e-9 + maxevals*1./d)))*d) #Set variables: Done data = generateData(dsList, EVALS, CrE) tmp = "\\textbf{\\textit{f}\\raisebox{-0.35ex}{%d}--\\textit{f}\\raisebox{-0.35ex}{%d} in %d-D}, maxFE/D=%s" \ % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6)) res.append(r" & \multicolumn{" + str(len(prcOfInterest)) + "}{|c}{" + tmp + "}") header = ["\\#FEs/D"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "\\textbf{med}" else: tmp = "%d\\%%" % i header.append(tmp) #set_trace() res.append(" & ".join(header)) for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i]/d < 200: tmp = [writeFEvals2(EVALS[i]/d, 3)] else: tmp = [writeFEvals2(EVALS[i]/d, 1)] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp.append("~\\,0") elif j < 1: tmp.append("~\\,%1.2f" % j) elif j < 10: tmp.append("\\hspace*{1ex}%1.1f" % j) elif j < 100: tmp.append("%2.0f" % j) else: ar = ("%1.1e" % j).split('e') tmp.append(ar[0] + 'e' + str(int(ar[1]))) # print tmp[-1] res.append(" & ".join(tmp)) # add last line: runlength distribution for which 1e-8 was not reached. tmp = [r"$\text{RL}_{\text{US}}$/D"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[numpy.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append(writeFEvals2(j/d, 1)) res.append(" & ".join(tmp)) res = (r"\\"+ "\n").join(res) res = r"\begin{tabular}{c|" + len(prcOfInterest) * "l" +"}\n" + res #res = r"\begin{tabular}{ccccc}" + "\n" + res res = res + "\n" + r"\end{tabular}" + "\n" filename = os.path.join(outputdir, 'pploglosstable_%02dD_%s.tex' % (d, info)) f = open(filename, 'w') f.write(res) f.close() if verbose: print "Wrote ERT loss ratio table in %s." % filename
def generateSingleTableHtml(dsList, funcs, mFE, d, prcOfInterest, EVALS, data, outputdir='.', info='default', verbose=True): """Generates single ERT loss ratio table. :param DataSetList dsList: input data set :param funcs: :param mFE: :param d: :param prcOfInterest: :param EVALS: :param data: :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ res = [] header = ["<thead>\n<tr>\n<th>#FEs/D</td>\n"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "med" else: tmp = "%d %%" % i header.append("<td>%s</td>\n" % tmp) #set_trace() res.append("".join(header)) res.append("</tr>\n</thead>\n") # add footer line: runlength distribution for which 1e-8 was not reached. res.append("<tfoot>\n<tr>\n") tmp = ["<th>RL<sub>US</sub>/D</td>\n"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[np.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append("<td>%s</td>\n" % writeFEvals2(j/d, 1)) res.append("".join(tmp)) res.append("</tr>\n</tfoot>\n") # add data res.append("<tbody>\n") for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) res.append("<tr>\n") # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i]/d < 200: tmp = writeFEvals2(EVALS[i]/d, 3) else: tmp = writeFEvals2(EVALS[i]/d, 1) tmp = ["<th sorttable_customkey=\"%f\">%s</th>\n" % ((EVALS[i]/d), tmp)] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp1 = "0" elif j < 1: tmp1 = "%1.2f" % j elif j < 10: tmp1 = "%1.1f" % j elif j < 100: tmp1 = "%2.0f" % j else: ar = ("%1.1e" % j).split('e') tmp1 = ar[0] + 'e' + str(int(ar[1])) tmp.append("<td sorttable_customkey=\"%f\">%s</td>\n" % (j, tmp1)) res.append("".join(tmp)) res.append("</tr>\n") res.append("</tbody>\n") res = ("").join(res) function = "<p><b><i>f</i><sub>%d</sub>–<i>f</i><sub>%d</sub> in %d-D</b>, maxFE/D=%s</p>\n" % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6)) res = function + "<table class=\"sortable\">\n" + res res = res + "</table>\n" filename = os.path.join(outputdir, genericsettings.single_algorithm_file_name + '.html') lines = [] with open(filename) as infile: for line in infile: if '<!--tables-->' in line: lines.append(res) lines.append(line) with open(filename, 'w') as outfile: for line in lines: outfile.write(line) if verbose: print "Wrote ERT loss ratio table in %s." % filename
def generateSingleTableTex(dsList, funcs, mFE, d, prcOfInterest, EVALS, data, outputdir='.', info='default', verbose=True): """Generates single ERT loss ratio table. :param DataSetList dsList: input data set :param funcs: :param mFE: :param d: :param prcOfInterest: :param EVALS: :param data: :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ res = [] tmp = "\\textbf{\\textit{f}\\raisebox{-0.35ex}{%d}--\\textit{f}\\raisebox{-0.35ex}{%d} in %d-D}, maxFE/D=%s" \ % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6)) res.append(r" & \multicolumn{" + str(len(prcOfInterest)) + "}{|c}{" + tmp + "}") header = ["\\#FEs/D"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "\\textbf{med}" else: tmp = "%d\\%%" % i header.append(tmp) #set_trace() res.append(" & ".join(header)) for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i]/d < 200: tmp = [writeFEvals2(EVALS[i]/d, 3)] else: tmp = [writeFEvals2(EVALS[i]/d, 1)] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp.append("~\\,0") elif j < 1: tmp.append("~\\,%1.2f" % j) elif j < 10: tmp.append("\\hspace*{1ex}%1.1f" % j) elif j < 100: tmp.append("%2.0f" % j) else: ar = ("%1.1e" % j).split('e') tmp.append(ar[0] + 'e' + str(int(ar[1]))) # print tmp[-1] res.append(" & ".join(tmp)) # add last line: runlength distribution for which 1e-8 was not reached. tmp = [r"$\text{RL}_{\text{US}}$/D"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[np.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append(writeFEvals2(j/d, 1)) res.append(" & ".join(tmp)) res = (r"\\"+ "\n").join(res) res = r"\begin{tabular}{c|" + len(prcOfInterest) * "l" +"}\n" + res #res = r"\begin{tabular}{ccccc}" + "\n" + res res = res + "\n" + r"\end{tabular}" + "\n" filename = os.path.join(outputdir, 'pploglosstable_%02dD_%s.tex' % (d, info)) f = open(filename, 'w') f.write(res) f.close() if verbose: print "Wrote ERT loss ratio table in %s." % filename
def generateSingleTableHtml(dsList, funcs, mFE, d, prcOfInterest, EVALS, data, outputdir='.', info='default', verbose=True): """Generates single ERT loss ratio table. :param DataSetList dsList: input data set :param funcs: :param mFE: :param d: :param prcOfInterest: :param EVALS: :param data: :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ res = [] header = ["<thead>\n<tr>\n<th>#FEs/D</td>\n"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "med" else: tmp = "%d %%" % i header.append("<td>%s</td>\n" % tmp) #set_trace() res.append("".join(header)) res.append("</tr>\n</thead>\n") # add footer line: runlength distribution for which 1e-8 was not reached. res.append("<tfoot>\n<tr>\n") tmp = ["<th>RL<sub>US</sub>/D</td>\n"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[np.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append("<td>%s</td>\n" % writeFEvals2(j / d, 1)) res.append("".join(tmp)) res.append("</tr>\n</tfoot>\n") # add data res.append("<tbody>\n") for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) res.append("<tr>\n") # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i] / d < 200: tmp = writeFEvals2(EVALS[i] / d, 3) else: tmp = writeFEvals2(EVALS[i] / d, 1) tmp = [ "<th sorttable_customkey=\"%f\">%s</th>\n" % ((EVALS[i] / d), tmp) ] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp1 = "0" elif j < 1: tmp1 = "%1.2f" % j elif j < 10: tmp1 = "%1.1f" % j elif j < 100: tmp1 = "%2.0f" % j else: ar = ("%1.1e" % j).split('e') tmp1 = ar[0] + 'e' + str(int(ar[1])) tmp.append("<td sorttable_customkey=\"%f\">%s</td>\n" % (j, tmp1)) res.append("".join(tmp)) res.append("</tr>\n") res.append("</tbody>\n") res = ("").join(res) function = "<p><b><i>f</i><sub>%d</sub>–<i>f</i><sub>%d</sub> in %d-D</b>, maxFE/D=%s</p>\n" % ( min(funcs), max(funcs), d, writeFEvals2(int(mFE / d), maxdigits=6)) res = function + "<table class=\"sortable\">\n" + res res = res + "</table>\n" filename = os.path.join( outputdir, genericsettings.single_algorithm_file_name + '.html') lines = [] with open(filename) as infile: for line in infile: if '<!--tables-->' in line: lines.append(res) lines.append(line) with open(filename, 'w') as outfile: for line in lines: outfile.write(line) if verbose: print "Wrote ERT loss ratio table in %s." % filename
def generateSingleTableTex(dsList, funcs, mFE, d, prcOfInterest, EVALS, data, outputdir='.', info='default', verbose=True): """Generates single ERT loss ratio table. :param DataSetList dsList: input data set :param funcs: :param mFE: :param d: :param prcOfInterest: :param EVALS: :param data: :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ res = [] tmp = "\\textbf{\\textit{f}\\raisebox{-0.35ex}{%d}--\\textit{f}\\raisebox{-0.35ex}{%d} in %d-D}, maxFE/D=%s" \ % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6)) res.append(r" & \multicolumn{" + str(len(prcOfInterest)) + "}{|c}{" + tmp + "}") header = ["\\#FEs/D"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "\\textbf{med}" else: tmp = "%d\\%%" % i header.append(tmp) #set_trace() res.append(" & ".join(header)) for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i] / d < 200: tmp = [writeFEvals2(EVALS[i] / d, 3)] else: tmp = [writeFEvals2(EVALS[i] / d, 1)] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp.append("~\\,0") elif j < 1: tmp.append("~\\,%1.2f" % j) elif j < 10: tmp.append("\\hspace*{1ex}%1.1f" % j) elif j < 100: tmp.append("%2.0f" % j) else: ar = ("%1.1e" % j).split('e') tmp.append(ar[0] + 'e' + str(int(ar[1]))) # print tmp[-1] res.append(" & ".join(tmp)) # add last line: runlength distribution for which 1e-8 was not reached. tmp = [r"$\text{RL}_{\text{US}}$/D"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[np.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append(writeFEvals2(j / d, 1)) res.append(" & ".join(tmp)) res = (r"\\" + "\n").join(res) res = r"\begin{tabular}{c|" + len(prcOfInterest) * "l" + "}\n" + res #res = r"\begin{tabular}{ccccc}" + "\n" + res res = res + "\n" + r"\end{tabular}" + "\n" filename = os.path.join(outputdir, 'pploglosstable_%02dD_%s.tex' % (d, info)) f = open(filename, 'w') f.write(res) f.close() if verbose: print "Wrote ERT loss ratio table in %s." % filename
def generateTable(dsList, CrE=0., outputdir='.', info='default', verbose=True): """Generates ERT loss ratio tables. :param DataSetList dsList: input data set :param float CrE: crafting effort (see COCO documentation) :param string outputdir: output folder (must exist) :param string info: string suffix for output file names :param bool verbose: controls verbosity """ #Set variables prcOfInterest = [0, 10, 25, 50, 75, 90] for d, dsdim in dsList.dictByDim().iteritems(): res = [] maxevals = [] funcs = [] mFE = [] for i in dsdim: maxevals.append(max(i.ert[numpy.isinf(i.ert) == False])) funcs.append(i.funcId) mFE.append(max(i.maxevals)) maxevals = max(maxevals) mFE = max(mFE) EVALS = [2. * d] EVALS.extend( numpy.power( 10., numpy.arange(1, numpy.log10(1e-9 + maxevals * 1. / d))) * d) #Set variables: Done data = generateData(dsList, EVALS, CrE) tmp = "\\textbf{\\textit{f}\\raisebox{-0.35ex}{%d}--\\textit{f}\\raisebox{-0.35ex}{%d} in %d-D}, maxFE/D=%s" \ % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6)) res.append(r" & \multicolumn{" + str(len(prcOfInterest)) + "}{|c}{" + tmp + "}") header = ["\\#FEs/D"] for i in prcOfInterest: if i == 0: tmp = "best" elif i == 50: tmp = "\\textbf{med}" else: tmp = "%d\\%%" % i header.append(tmp) #set_trace() res.append(" & ".join(header)) for i in range(len(EVALS)): tmpdata = list(data[f][i] for f in data) #set_trace() tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) # format entries #tmp = [writeFEvals(EVALS[i]/d, '.0')] if EVALS[i] / d < 200: tmp = [writeFEvals2(EVALS[i] / d, 3)] else: tmp = [writeFEvals2(EVALS[i] / d, 1)] for j in tmpdata: # tmp.append(writeFEvals(j, '.2')) # tmp.append(writeFEvals2(j, 2)) if j == 0.: tmp.append("~\\,0") elif j < 1: tmp.append("~\\,%1.2f" % j) elif j < 10: tmp.append("\\hspace*{1ex}%1.1f" % j) elif j < 100: tmp.append("%2.0f" % j) else: ar = ("%1.1e" % j).split('e') tmp.append(ar[0] + 'e' + str(int(ar[1]))) # print tmp[-1] res.append(" & ".join(tmp)) # add last line: runlength distribution for which 1e-8 was not reached. tmp = [r"$\text{RL}_{\text{US}}$/D"] tmpdata = [] for i in dsList: it = reversed(i.evals) curline = None nextline = it.next() while nextline[0] <= f_thresh: curline = nextline[1:] nextline = it.next() if curline is None: tmpdata.extend(i.maxevals) else: tmpdata.extend(i.maxevals[numpy.isnan(curline)]) #set_trace() if tmpdata: # if it is not empty tmpdata = toolsstats.prctile(tmpdata, prcOfInterest) for j in tmpdata: tmp.append(writeFEvals2(j / d, 1)) res.append(" & ".join(tmp)) res = (r"\\" + "\n").join(res) res = r"\begin{tabular}{c|" + len(prcOfInterest) * "l" + "}\n" + res #res = r"\begin{tabular}{ccccc}" + "\n" + res res = res + "\n" + r"\end{tabular}" + "\n" filename = os.path.join(outputdir, 'pploglosstable_%02dD_%s.tex' % (d, info)) f = open(filename, 'w') f.write(res) f.close() if verbose: print "Wrote ERT loss ratio table in %s." % filename
def plot(dsList, param='dim', targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)): """Generate plot of ERT vs param.""" dictparam = dsList.dictByParam(param) params = sorted(dictparam) # sorted because we draw lines # generate plot from dsList res = [] # collect data rawdata = {} for p in params: assert len(dictparam[p]) == 1 rawdata[p] = dictparam[p][0].detEvals(targets) # expect dictparam[p] to have only one element # plot lines for ERT xpltdata = params for i, t in enumerate(targets): ypltdata = [] for p in params: data = rawdata[p][i] unsucc = np.isnan(data) assert len(dictparam[p]) == 1 data[unsucc] = dictparam[p][0].maxevals # compute ERT ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False)) ypltdata.append(ert) res.extend(plt.plot(xpltdata, ypltdata, markersize=20, zorder=len(targets) - i, **styles[i])) # for the legend plt.plot([], [], markersize=10, label=' %+d' % (np.log10(targets[i])), **styles[i]) # plot median of successful runs for hardest target with a success for p in params: for i, t in enumerate(reversed(targets)): # targets has to be from hardest to easiest data = rawdata[p][i] data = data[np.isnan(data) == False] if len(data) > 0: median = toolsstats.prctile(data, 50.)[0] res.extend(plt.plot(p, median, styles[i]['color'], **medmarker)) break # plot average number of function evaluations for the hardest target xpltdata = [] ypltdata = [] for p in params: data = rawdata[p][0] # first target xpltdata.append(p) if (np.isnan(data) == False).all(): tmpdata = data.copy() assert len(dictparam[p]) == 1 tmpdata[np.isnan(data)] = dictparam[p][0].maxevals[np.isnan(data)] tmp = np.mean(tmpdata) else: tmp = np.nan # Check what happens when plotting NaN ypltdata.append(tmp) res.extend(plt.plot(xpltdata, ypltdata, **avgstyle)) # display numbers of successes for hardest target where there is still one success for p in params: for i, t in enumerate(targets): # targets has to be from hardest to easiest data = rawdata[p][i] unsucc = np.isnan(data) assert len(dictparam[p]) == 1 data[unsucc] = dictparam[p][0].maxevals # compute ERT ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False)) if srate == 1.: break elif succ > 0: res.append(plt.text(p, ert * 1.85, "%d" % succ, axes=plt.gca(), horizontalalignment="center", verticalalignment="bottom")) break return res