Пример #1
0
    if 1 < 3:
        print ("Post-processing: will generate output " +
               "data in folder %s" % outputdir)
        print "  this might take several minutes."

        if not os.path.exists(outputdir):
            os.makedirs(outputdir)
            if verbose:
                print 'Folder %s was created.' % (outputdir)

        # prepend the algorithm name command to the tex-command file
        abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
        lines = []
        for i, alg in enumerate(args):
            lines.append('\\providecommand{\\algorithm' + abc[i] + '}{' + 
                    str_to_latex(strip_pathname2(alg)) + '}')
        prepend_to_file(os.path.join(outputdir, 'bbob_pproc_commands.tex'), 
                     lines, 5000, 
                     'bbob_proc_commands.tex truncated, consider removing the file before the text run'
                     )

        dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose)

        if not dsList:
            sys.exit()

        for i in dictAlg:
            if isNoisy and not isNoiseFree:
                dictAlg[i] = dictAlg[i].dictByNoise().get('nzall', DataSetList())
            if isNoiseFree and not isNoisy:
                dictAlg[i] = dictAlg[i].dictByNoise().get('noiselessall', DataSetList())
Пример #2
0
def main(dictAlg,
         order=None,
         outputdir='.',
         info='default',
         dimension=None,
         verbose=True):
    """Generates a figure showing the performance of algorithms.

    From a dictionary of :py:class:`DataSetList` sorted by algorithms,
    generates the cumulative distribution function of the bootstrap
    distribution of ERT for algorithms on multiple functions for
    multiple targets altogether.

    :param dict dictAlg: dictionary of :py:class:`DataSetList` instances
                         one instance is equivalent to one algorithm,
    :param list targets: target function values
    :param list order: sorted list of keys to dictAlg for plotting order
    :param str outputdir: output directory
    :param str info: output file name suffix
    :param bool verbose: controls verbosity

    """
    global x_limit  # late assignment of default, because it can be set to None in config
    global divide_by_dimension  # not fully implemented/tested yet
    if 'x_limit' not in globals() or x_limit is None:
        x_limit = x_limit_default

    tmp = pp.dictAlgByDim(dictAlg)
    # tmp = pp.DictAlg(dictAlg).by_dim()

    if len(tmp) != 1 and dimension is None:
        raise ValueError('We never integrate over dimension.')
    if dimension is not None:
        if dimension not in tmp.keys():
            raise ValueError('dimension %d not in dictAlg dimensions %s' %
                             (dimension, str(tmp.keys())))
        tmp = {dimension: tmp[dimension]}
    dim = tmp.keys()[0]
    divisor = dim if divide_by_dimension else 1

    algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []]

    dictFunc = pp.dictAlgByFun(dictAlg)

    # Collect data
    # Crafting effort correction: should we consider any?
    CrEperAlg = {}
    for alg in algorithms_with_data:
        CrE = 0.
        if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL':
            tmp = dictAlg[alg].dictByNoise()
            assert len(tmp.keys()) == 1
            if tmp.keys()[0] == 'noiselessall':
                CrE = 0.5117
            elif tmp.keys()[0] == 'nzall':
                CrE = 0.6572
        CrEperAlg[alg] = CrE
        if CrE != 0.0:
            print 'Crafting effort for', alg, 'is', CrE

    dictData = {}  # list of (ert per function) per algorithm
    dictMaxEvals = {}  # list of (maxevals per function) per algorithm
    bestERT = []  # best ert per function
    # funcsolved = [set()] * len(targets) # number of functions solved per target
    xbest2009 = []
    maxevalsbest2009 = []
    for f, dictAlgperFunc in dictFunc.iteritems():
        if function_IDs and f not in function_IDs:
            continue
        # print target_values((f, dim))
        for j, t in enumerate(target_values((f, dim))):
            # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)):
            # funcsolved[j].add(f)

            for alg in algorithms_with_data:
                x = [np.inf] * perfprofsamplesize
                runlengthunsucc = []
                try:
                    entry = dictAlgperFunc[alg][
                        0]  # one element per fun and per dim.
                    evals = entry.detEvals([t])[0]
                    assert entry.dim == dim
                    runlengthsucc = evals[np.isnan(evals) == False] / divisor
                    runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor
                    if len(runlengthsucc) > 0:
                        x = toolsstats.drawSP(runlengthsucc,
                                              runlengthunsucc,
                                              percentiles=[50],
                                              samplesize=perfprofsamplesize)[1]
                except (KeyError, IndexError):
                    #set_trace()
                    warntxt = (
                        'Data for algorithm %s on function %d in %d-D ' %
                        (alg, f, dim) + 'are missing.\n')
                    warnings.warn(warntxt)

                dictData.setdefault(alg, []).extend(x)
                dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc)

        if displaybest2009:
            #set_trace()
            if not bestalg.bestalgentries2009:
                bestalg.loadBBOB2009()
            bestalgentry = bestalg.bestalgentries2009[(dim, f)]
            bestalgevals = bestalgentry.detEvals(target_values((f, dim)))
            # print bestalgevals
            for j in range(len(bestalgevals[0])):
                if bestalgevals[1][j]:
                    evals = bestalgevals[0][j]
                    #set_trace()
                    assert dim == bestalgentry.dim
                    runlengthsucc = evals[np.isnan(evals) == False] / divisor
                    runlengthunsucc = bestalgentry.maxevals[
                        bestalgevals[1][j]][np.isnan(evals)] / divisor
                    x = toolsstats.drawSP(runlengthsucc,
                                          runlengthunsucc,
                                          percentiles=[50],
                                          samplesize=perfprofsamplesize)[1]
                else:
                    x = perfprofsamplesize * [np.inf]
                    runlengthunsucc = []
                xbest2009.extend(x)
                maxevalsbest2009.extend(runlengthunsucc)

    if order is None:
        order = dictData.keys()

    # Display data
    lines = []
    if displaybest2009:
        args = {
            'ls': '-',
            'linewidth': 6,
            'marker': 'D',
            'markersize': 11.,
            'markeredgewidth': 1.5,
            'markerfacecolor': refcolor,
            'markeredgecolor': refcolor,
            'color': refcolor,
            'label': 'best 2009',
            'zorder': -1
        }
        lines.append(
            plotdata(np.array(xbest2009),
                     x_limit,
                     maxevalsbest2009,
                     CrE=0.,
                     **args))

    def algname_to_label(algname, dirname=None):
        """to be extended to become generally useful"""
        if isinstance(algname, (tuple, list)):  # not sure this is needed
            return ' '.join([str(name) for name in algname])
        return str(algname)

    for i, alg in enumerate(order):
        try:
            data = dictData[alg]
            maxevals = dictMaxEvals[alg]
        except KeyError:
            continue

        args = styles[(i) % len(styles)]
        args['linewidth'] = 1.5
        args['markersize'] = 12.
        args['markeredgewidth'] = 1.5
        args['markerfacecolor'] = 'None'
        args['markeredgecolor'] = args['color']
        args['label'] = algname_to_label(alg)
        #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib
        #elif len(show_algorithms) > 0:
        #args['color'] = 'wheat'
        #args['ls'] = '-'
        #args['zorder'] = -1
        # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work
        lines.append(
            plotdata(np.array(data),
                     x_limit,
                     maxevals,
                     CrE=CrEperAlg[alg],
                     **args))

    labels, handles = plotLegend(lines, x_limit)
    if True:  # isLateXLeg:
        fileName = os.path.join(outputdir, 'pprldmany_%s.tex' % (info))
        with open(fileName, 'w') as f:
            f.write(r'\providecommand{\nperfprof}{7}')
            algtocommand = {}  # latex commands
            for i, alg in enumerate(order):
                tmp = r'\alg%sperfprof' % pptex.numtotext(i)
                f.write(
                    r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' %
                    (tmp,
                     toolsdivers.str_to_latex(
                         toolsdivers.strip_pathname2(algname_to_label(alg)))))
                algtocommand[algname_to_label(alg)] = tmp
            if displaybest2009:
                tmp = r'\algzeroperfprof'
                f.write(r'\providecommand{%s}{best 2009}' % (tmp))
                algtocommand['best 2009'] = tmp

            commandnames = []
            for label in labels:
                commandnames.append(algtocommand[label])
            # f.write(headleg)
            if len(
                    order
            ) > 28:  # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped
                f.write(
                    r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}'
                    % (commandnames[0], commandnames[-1]))
            else:
                fontsize_command = r'\tiny{}' if len(order) > 19 else ''
                f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' %
                        (fontsize_command,
                         commandnames[0]))  # TODO: check len(labels) > 0
                for i in range(1, len(labels)):
                    f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i])
                f.write('}}\n')
            # f.write(footleg)
            if verbose:
                print 'Wrote right-hand legend in %s' % fileName

    figureName = os.path.join(outputdir, 'pprldmany_%s' % (info))
    #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat)
    beautify()

    text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys())))
    text += ',%d-D' % dim  # TODO: this is strange when different dimensions are plotted
    plt.text(0.01,
             0.98,
             text,
             horizontalalignment="left",
             verticalalignment="top",
             transform=plt.gca().transAxes)
    if len(dictFunc) == 1:
        plt.title(' '.join(
            (str(dictFunc.keys()[0]),
             genericsettings.current_testbed.short_names[dictFunc.keys()[0]])))
    a = plt.gca()

    plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative)
    xticks, labels = plt.xticks()
    tmp = []
    for i in xticks:
        tmp.append('%d' % round(np.log10(i)))
    a.set_xticklabels(tmp)

    if save_figure:
        ppfig.saveFigure(figureName, verbose=verbose)
        if len(dictFunc) == 1:
            ppfig.save_single_functions_html(
                os.path.join(outputdir, 'pprldmany'),
                '',  # algorithms names are clearly visible in the figure
                add_to_names='_%02dD' % (dim),
                algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED)
    if close_figure:
        plt.close()
Пример #3
0
    if 1 < 3:
        print("Post-processing: will generate output " +
              "data in folder %s" % outputdir)
        print "  this might take several minutes."

        if not os.path.exists(outputdir):
            os.makedirs(outputdir)
            if verbose:
                print 'Folder %s was created.' % (outputdir)

        # prepend the algorithm name command to the tex-command file
        abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
        lines = []
        for i, alg in enumerate(args):
            lines.append('\\providecommand{\\algorithm' + abc[i] + '}{' +
                         str_to_latex(strip_pathname2(alg)) + '}')
        prepend_to_file(
            os.path.join(outputdir, 'bbob_pproc_commands.tex'), lines, 5000,
            'bbob_proc_commands.tex truncated, consider removing the file before the text run'
        )

        dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose)

        if not dsList:
            sys.exit()

        for i in dictAlg:
            if isNoisy and not isNoiseFree:
                dictAlg[i] = dictAlg[i].dictByNoise().get(
                    'nzall', DataSetList())
            if isNoiseFree and not isNoisy:
Пример #4
0
def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default',
         dimension=None, verbose=True):
    """Generates a figure showing the performance of algorithms.

    From a dictionary of :py:class:`DataSetList` sorted by algorithms,
    generates the cumulative distribution function of the bootstrap
    distribution of ERT for algorithms on multiple functions for
    multiple targets altogether.

    :param dict dictAlg: dictionary of :py:class:`DataSetList` instances
                         one instance is equivalent to one algorithm,
    :param list targets: target function values
    :param list order: sorted list of keys to dictAlg for plotting order
    :param str outputdir: output directory
    :param str info: output file name suffix
    :param bool verbose: controls verbosity

    """
    global x_limit  # late assignment of default, because it can be set to None in config 
    global divide_by_dimension  # not fully implemented/tested yet
    if 'x_limit' not in globals() or x_limit is None:
        x_limit = x_limit_default

    tmp = pp.dictAlgByDim(dictAlg)
    # tmp = pp.DictAlg(dictAlg).by_dim()

    if len(tmp) != 1 and dimension is None:
        raise ValueError('We never integrate over dimension.')
    if dimension is not None:
        if dimension not in tmp.keys():
            raise ValueError('dimension %d not in dictAlg dimensions %s'
                             % (dimension, str(tmp.keys())))
        tmp = {dimension: tmp[dimension]}
    dim = tmp.keys()[0]
    divisor = dim if divide_by_dimension else 1

    algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []]

    dictFunc = pp.dictAlgByFun(dictAlg)

    # Collect data
    # Crafting effort correction: should we consider any?
    CrEperAlg = {}
    for alg in algorithms_with_data:
        CrE = 0.
        if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL':
            tmp = dictAlg[alg].dictByNoise()
            assert len(tmp.keys()) == 1
            if tmp.keys()[0] == 'noiselessall':
                CrE = 0.5117
            elif tmp.keys()[0] == 'nzall':
                CrE = 0.6572
        CrEperAlg[alg] = CrE
        if CrE != 0.0: 
            print 'Crafting effort for', alg, 'is', CrE

    dictData = {} # list of (ert per function) per algorithm
    dictMaxEvals = {} # list of (maxevals per function) per algorithm
    bestERT = [] # best ert per function
    # funcsolved = [set()] * len(targets) # number of functions solved per target
    xbest2009 = []
    maxevalsbest2009 = []
    for f, dictAlgperFunc in dictFunc.iteritems():
        if function_IDs and f not in function_IDs:
            continue
        # print target_values((f, dim))
        for j, t in enumerate(target_values((f, dim))):
        # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)):
            # funcsolved[j].add(f)

            for alg in algorithms_with_data:
                x = [np.inf] * perfprofsamplesize
                runlengthunsucc = []
                try:
                    entry = dictAlgperFunc[alg][0] # one element per fun and per dim.
                    evals = entry.detEvals([t])[0]
                    assert entry.dim == dim
                    runlengthsucc = evals[np.isnan(evals) == False] / divisor
                    runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor
                    if len(runlengthsucc) > 0:
                        x = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
                                             percentiles=[50],
                                             samplesize=perfprofsamplesize)[1]
                except (KeyError, IndexError):
                    #set_trace()
                    warntxt = ('Data for algorithm %s on function %d in %d-D '
                           % (alg, f, dim)
                           + 'are missing.\n')
                    warnings.warn(warntxt)

                dictData.setdefault(alg, []).extend(x)
                dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc)

        displaybest2009 = not isBiobjective #disabled until we find the bug
        if displaybest2009:
            #set_trace()
            bestalgentries = bestalg.loadBestAlgorithm(isBiobjective)
            bestalgentry = bestalgentries[(dim, f)]
            bestalgevals = bestalgentry.detEvals(target_values((f, dim)))
            # print bestalgevals
            for j in range(len(bestalgevals[0])):
                if bestalgevals[1][j]:
                    evals = bestalgevals[0][j]
                    #set_trace()
                    assert dim == bestalgentry.dim
                    runlengthsucc = evals[np.isnan(evals) == False] / divisor
                    runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / divisor
                    x = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
                                         percentiles=[50],
                                         samplesize=perfprofsamplesize)[1]
                else:
                    x = perfprofsamplesize * [np.inf]
                    runlengthunsucc = []
                xbest2009.extend(x)
                maxevalsbest2009.extend(runlengthunsucc)
                
    if order is None:
        order = dictData.keys()

    # Display data
    lines = []
    if displaybest2009:
        args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11.,
                'markeredgewidth': 1.5, 'markerfacecolor': refcolor,
                'markeredgecolor': refcolor, 'color': refcolor,
                'label': 'best 2009', 'zorder': -1}
        lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009,
                                  CrE = 0., **args))

    def algname_to_label(algname, dirname=None):
        """to be extended to become generally useful"""
        if isinstance(algname, (tuple, list)): # not sure this is needed
            return ' '.join([str(name) for name in algname])
        return str(algname)
    for i, alg in enumerate(order):
        try:
            data = dictData[alg]
            maxevals = dictMaxEvals[alg]
        except KeyError:
            continue

        args = styles[(i) % len(styles)]
        args['linewidth'] = 1.5
        args['markersize'] = 12.
        args['markeredgewidth'] = 1.5
        args['markerfacecolor'] = 'None'
        args['markeredgecolor'] = args['color']
        args['label'] = algname_to_label(alg)
        #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib
        #elif len(show_algorithms) > 0:
            #args['color'] = 'wheat'
            #args['ls'] = '-'
            #args['zorder'] = -1
        # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work
        lines.append(plotdata(np.array(data), x_limit, maxevals,
                                  CrE=CrEperAlg[alg], **args))

    labels, handles = plotLegend(lines, x_limit)
    if True:  # isLateXLeg:
        fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info))
        with open(fileName, 'w') as f:
            f.write(r'\providecommand{\nperfprof}{7}')
            algtocommand = {}  # latex commands
            for i, alg in enumerate(order):
                tmp = r'\alg%sperfprof' % pptex.numtotext(i)
                f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' %
                        (tmp, toolsdivers.str_to_latex(
                                toolsdivers.strip_pathname2(algname_to_label(alg)))))
                algtocommand[algname_to_label(alg)] = tmp
            if displaybest2009:
                tmp = r'\algzeroperfprof'
                f.write(r'\providecommand{%s}{best 2009}' % (tmp))
                algtocommand['best 2009'] = tmp

            commandnames = []
            for label in labels:
                commandnames.append(algtocommand[label])
            # f.write(headleg)
            if len(order) > 28:  # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped
                f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}'
                        % (commandnames[0], commandnames[-1]))
            else:
                fontsize_command = r'\tiny{}' if len(order) > 19 else ''
                f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' %
                        (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0
                for i in range(1, len(labels)):
                    f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i])
                f.write('}}\n')
            # f.write(footleg)
            if verbose:
                print 'Wrote right-hand legend in %s' % fileName

    figureName = os.path.join(outputdir,'pprldmany_%s' % (info))
    #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat)
    beautify()

    text = ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f')
    text += ',%d-D' % dim  # TODO: this is strange when different dimensions are plotted
    plt.text(0.01, 0.98, text, horizontalalignment="left",
             verticalalignment="top", transform=plt.gca().transAxes)
    if len(dictFunc) == 1:
        plt.title(' '.join((str(dictFunc.keys()[0]),
                  genericsettings.current_testbed.short_names[dictFunc.keys()[0]])))
    a = plt.gca()

    plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative)
    xticks, labels = plt.xticks()
    tmp = []
    for i in xticks:
        tmp.append('%d' % round(np.log10(i)))
    a.set_xticklabels(tmp)

    if save_figure:
        ppfig.saveFigure(figureName, verbose=verbose)
        if len(dictFunc) == 1:
            ppfig.save_single_functions_html(
                os.path.join(outputdir, 'pprldmany'),
                '', # algorithms names are clearly visible in the figure
                add_to_names='_%02dD' %(dim),
                algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED
            )
    if close_figure:
        plt.close()
Пример #5
0
def main(dictAlg, sortedAlgs, outputdir='.', verbose=True, function_targets_line=True):  # [1, 13, 101]
    """Generate one table per func with results of multiple algorithms."""
    """Difference with the first version:

    * numbers aligned using the decimal separator
    * premices for dispersion measure
    * significance test against best algorithm
    * table width...

    Takes ``targetsOfInterest`` from this file as "input argument" to compute
    the desired target values. ``targetsOfInterest`` might be configured via 
    config.
    
    """

    # TODO: method is long, terrible to read, split if possible

    if not bestalg.bestalgentries2009:
        bestalg.loadBBOB2009()

    # Sort data per dimension and function
    dictData = {}
    dsListperAlg = list(dictAlg[i] for i in sortedAlgs)
    for n, entries in enumerate(dsListperAlg):
        tmpdictdim = entries.dictByDim()
        for d in tmpdictdim:
            tmpdictfun = tmpdictdim[d].dictByFunc()
            for f in tmpdictfun:
                dictData.setdefault((d, f), {})[n] = tmpdictfun[f]

    nbtests = len(dictData)

    for df in dictData:
        # Generate one table per df
        # first update targets for each dimension-function pair if needed:
        targets = targetsOfInterest((df[1], df[0]))            
        targetf = targets[-1]
        
        # best 2009
        refalgentry = bestalg.bestalgentries2009[df]
        refalgert = refalgentry.detERT(targets)
        refalgevals = (refalgentry.detEvals((targetf, ))[0][0])
        refalgnbruns = len(refalgevals)
        refalgnbsucc = numpy.sum(numpy.isnan(refalgevals) == False)

        # Process the data
        # The following variables will be lists of elements each corresponding
        # to an algorithm
        algnames = []
        #algdata = []
        algerts = []
        algevals = []
        algdisp = []
        algnbsucc = []
        algnbruns = []
        algmedmaxevals = []
        algmedfinalfunvals = []
        algtestres = []
        algentries = []

        for n in sorted(dictData[df].keys()):
            entries = dictData[df][n]
            # the number of datasets for a given dimension and function (df)
            # should be strictly 1. TODO: find a way to warn
            # TODO: do this checking before... why wasn't it triggered by ppperprof?
            if len(entries) > 1:
                txt = ("There is more than a single entry associated with "
                       "folder %s on %d-D f%d." % (sortedAlgs[n], df[0], df[1]))
                raise Exception(txt)

            entry = entries[0]
            algentries.append(entry)

            algnames.append(sortedAlgs[n])

            evals = entry.detEvals(targets)
            #tmpdata = []
            tmpdisp = []
            tmpert = []
            for i, e in enumerate(evals):
                succ = (numpy.isnan(e) == False)
                ec = e.copy() # note: here was the previous bug (changes made in e also appeared in evals !)
                ec[succ == False] = entry.maxevals[succ == False]
                ert = toolsstats.sp(ec, issuccessful=succ)[0]
                #tmpdata.append(ert/refalgert[i])
                if succ.any():
                    tmp = toolsstats.drawSP(ec[succ], entry.maxevals[succ == False],
                                           [10, 50, 90], samplesize=samplesize)[0]
                    tmpdisp.append((tmp[-1] - tmp[0])/2.)
                else:
                    tmpdisp.append(numpy.nan)
                tmpert.append(ert)
            algerts.append(tmpert)
            algevals.append(evals)
            #algdata.append(tmpdata)
            algdisp.append(tmpdisp)
            algmedmaxevals.append(numpy.median(entry.maxevals))
            algmedfinalfunvals.append(numpy.median(entry.finalfunvals))
            #algmedmaxevals.append(numpy.median(entry.maxevals)/df[0])
            #algmedfinalfunvals.append(numpy.median(entry.finalfunvals))

            algtestres.append(significancetest(refalgentry, entry, targets))

            # determine success probability for Df = 1e-8
            e = entry.detEvals((targetf ,))[0]
            algnbsucc.append(numpy.sum(numpy.isnan(e) == False))
            algnbruns.append(len(e))

        # Process over all data
        # find best values...
            
        nalgs = len(dictData[df])
        maxRank = 1 + numpy.floor(0.14 * nalgs)  # number of algs to be displayed in bold

        isBoldArray = [] # Point out the best values
        algfinaldata = [] # Store median function values/median number of function evaluations
        tmptop = getTopIndicesOfColumns(algerts, maxRank=maxRank)
        for i, erts in enumerate(algerts):
            tmp = []
            for j, ert in enumerate(erts):  # algi targetj
                tmp.append(i in tmptop[j] or (nalgs > 7 and algerts[i][j] <= 3. * refalgert[j]))
            isBoldArray.append(tmp)
            algfinaldata.append((algmedfinalfunvals[i], algmedmaxevals[i]))

        # significance test of best given algorithm against all others
        best_alg_idx = numpy.array(algerts).argsort(0)[0, :]  # indexed by target index
        significance_versus_others = significance_all_best_vs_other(algentries, targets, best_alg_idx)[0]
                
        # Create the table
        table = []
        spec = r'@{}c@{}|*{%d}{@{\,}r@{}X@{\,}}|@{}r@{}@{}l@{}' % (len(targets)) # in case StrLeft not working: replaced c@{} with l@{ }
        spec = r'@{}c@{}|*{%d}{@{}r@{}X@{}}|@{}r@{}@{}l@{}' % (len(targets)) # in case StrLeft not working: replaced c@{} with l@{ }
        extraeol = []

        # Generate header lines
        if with_table_heading:
            header = funInfos[df[1]] if funInfos else 'f%d' % df[1]
            table.append([r'\multicolumn{%d}{@{\,}c@{\,}}{{\textbf{%s}}}'
                          % (2 * len(targets) + 2, header)])
            extraeol.append('')

        if function_targets_line is True or (function_targets_line and df[1] in function_targets_line):
            if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
                curline = [r'\#FEs/D']
                for i in targetsOfInterest.labels():
                    curline.append(r'\multicolumn{2}{@{}c@{}}{%s}'
                                % i) 
                                
            else:
                curline = [r'$\Delta f_\mathrm{opt}$']
                for t in targets:
                    curline.append(r'\multicolumn{2}{@{\,}X@{\,}}{%s}'
                                % writeFEvals2(t, precision=1, isscientific=True))
#                curline.append(r'\multicolumn{2}{@{\,}X@{}|}{%s}'
#                            % writeFEvals2(targets[-1], precision=1, isscientific=True))
            curline.append(r'\multicolumn{2}{@{}l@{}}{\#succ}')
            table.append(curline)
        extraeol.append(r'\hline')
#        extraeol.append(r'\hline\arrayrulecolor{tableShade}')

        curline = [r'ERT$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]] 
        if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
            # write ftarget:fevals
            for i in xrange(len(refalgert[:-1])):
                temp="%.1e" %targetsOfInterest((df[1], df[0]))[i]
                if temp[-2]=="0":
                    temp=temp[:-2]+temp[-1]
                curline.append(r'\multicolumn{2}{@{}c@{}}{\textit{%s}:%s \quad}'
                                   % (temp,writeFEvalsMaxPrec(refalgert[i], 2)))
            temp="%.1e" %targetsOfInterest((df[1], df[0]))[-1]
            if temp[-2]=="0":
                temp=temp[:-2]+temp[-1]
            curline.append(r'\multicolumn{2}{@{}c@{}|}{\textit{%s}:%s }'
                               % (temp,writeFEvalsMaxPrec(refalgert[-1], 2))) 
        else:            
            # write #fevals of the reference alg
            for i in refalgert[:-1]:
                curline.append(r'\multicolumn{2}{@{}c@{}}{%s \quad}'
                                   % writeFEvalsMaxPrec(i, 2))
            curline.append(r'\multicolumn{2}{@{}c@{}|}{%s}'
                               % writeFEvalsMaxPrec(refalgert[-1], 2))
    
        # write the success ratio for the reference alg
        tmp2 = numpy.sum(numpy.isnan(refalgevals) == False) # count the nb of success
        curline.append('%d' % (tmp2))
        if tmp2 > 0:
            curline.append('/%d' % len(refalgevals))

        table.append(curline[:])
        extraeol.append('')

        #for i, gna in enumerate(zip((1, 2, 3), ('bla', 'blo', 'bli'))):
            #print i, gna, gno
            #set_trace()
        # Format data
        #if df == (5, 17):
            #set_trace()

        header = r'\providecommand{\ntables}{7}'
        for i, alg in enumerate(algnames):
            #algname, entries, irs, line, line2, succ, runs, testres1alg in zip(algnames,
            #data, dispersion, isBoldArray, isItalArray, nbsucc, nbruns, testres):
            commandname = r'\alg%stables' % numtotext(i)
#            header += r'\providecommand{%s}{{%s}{}}' % (commandname, str_to_latex(strip_pathname(alg)))
            header += r'\providecommand{%s}{\StrLeft{%s}{\ntables}}' % (commandname, str_to_latex(strip_pathname2(alg)))
            curline = [commandname + r'\hspace*{\fill}']  # each list element becomes a &-separated table entry?

            for j, tmp in enumerate(zip(algerts[i], algdisp[i],  # j is target index
                                        isBoldArray[i], algtestres[i])):
                ert, dispersion, isBold, testres = tmp
                alignment = '@{\,}X@{\,}'
                if j == len(algerts[i]) - 1:
                    alignment = '@{\,}X@{\,}|'

                data = ert/refalgert[j]
                # write star for significance against all other algorithms
                str_significance_subsup = ''
                if (len(best_alg_idx) > 0 and len(significance_versus_others) > 0 and 
                    i == best_alg_idx[j] and nbtests * significance_versus_others[j][1] < 0.05):
                    logp = -numpy.ceil(numpy.log10(nbtests * significance_versus_others[j][1]))
                    str_significance_subsup =  r"^{%s%s}" % (significance_vs_others_symbol, str(int(logp)) if logp > 1 else '')

                # moved out of the above else: this was a bug!?
                z, p = testres
                if (nbtests * p) < 0.05 and data < 1. and z < 0.: 
                    if not numpy.isinf(refalgert[j]):
                        tmpevals = algevals[i][j].copy()
                        tmpevals[numpy.isnan(tmpevals)] = algentries[i].maxevals[numpy.isnan(tmpevals)]
                        bestevals = refalgentry.detEvals(targets)
                        bestevals, bestalgalg = (bestevals[0][0], bestevals[1][0])
                        bestevals[numpy.isnan(bestevals)] = refalgentry.maxevals[bestalgalg][numpy.isnan(bestevals)]
                        tmpevals = numpy.array(sorted(tmpevals))[0:min(len(tmpevals), len(bestevals))]
                        bestevals = numpy.array(sorted(bestevals))[0:min(len(tmpevals), len(bestevals))]

                    #The conditions are now that ERT < ERT_best and
                    # all(sorted(FEvals_best) > sorted(FEvals_current)).
                    if numpy.isinf(refalgert[j]) or all(tmpevals < bestevals):
                        nbstars = -numpy.ceil(numpy.log10(nbtests * p))
                        # tmp2[-1] += r'$^{%s}$' % superscript
                        str_significance_subsup += r'_{%s%s}' % (significance_vs_ref_symbol, 
                                                                 str(int(nbstars)) if nbstars > 1 else '')
                if str_significance_subsup:
                    str_significance_subsup = '$%s$' % str_significance_subsup

                # format number in variable data
                if numpy.isnan(data):
                    curline.append(r'\multicolumn{2}{%s}{.}' % alignment)
                else:
                    if numpy.isinf(refalgert[j]):
                        curline.append(r'\multicolumn{2}{%s}{\textbf{%s}\mbox{\tiny (%s)}%s}'
                                       % (alignment,
                                          writeFEvalsMaxPrec(algerts[i][j], 2),
                                          writeFEvalsMaxPrec(dispersion, precdispersion), 
                                          str_significance_subsup))
                        continue

                    tmp = writeFEvalsMaxPrec(data, precfloat, maxfloatrepr=maxfloatrepr)
                    if data >= maxfloatrepr or data < 0.01: # either inf or scientific notation
                        if numpy.isinf(data) and j == len(algerts[i]) - 1:
                            tmp += r'\,\textit{%s}' % writeFEvalsMaxPrec(algfinaldata[i][1], 0, maxfloatrepr=maxfloatrepr)
                        else:
                            tmp = writeFEvalsMaxPrec(data, precscien, maxfloatrepr=data)
                            if isBold:
                                tmp = r'\textbf{%s}' % tmp

                        if not numpy.isnan(dispersion):
                            tmpdisp = dispersion/refalgert[j]
                            if tmpdisp >= maxfloatrepr or tmpdisp < 0.005: # TODO: hack
                                tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=tmpdisp)
                            else:
                                tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=maxfloatrepr)
                            tmp += r'\mbox{\tiny (%s)}' % tmpdisp
                        curline.append(r'\multicolumn{2}{%s}{%s%s}' % (alignment, tmp, str_significance_subsup))
                    else:
                        tmp2 = tmp.split('.', 1)
                        if len(tmp2) < 2:
                            tmp2.append('')
                        else:
                            tmp2[-1] = '.' + tmp2[-1]
                        if isBold:
                            tmp3 = []
                            for k in tmp2:
                                tmp3.append(r'\textbf{%s}' % k)
                            tmp2 = tmp3
                        if not numpy.isnan(dispersion):
                            tmpdisp = dispersion/refalgert[j]
                            if tmpdisp >= maxfloatrepr or tmpdisp < 0.01:
                                tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=tmpdisp)
                            else:
                                tmpdisp = writeFEvalsMaxPrec(tmpdisp, precdispersion, maxfloatrepr=maxfloatrepr)
                            tmp2[-1] += (r'\mbox{\tiny (%s)}' % (tmpdisp))
                        tmp2[-1] += str_significance_subsup
                        curline.extend(tmp2)
                                        
            curline.append('%d' % algnbsucc[i])
            curline.append('/%d' % algnbruns[i])
            table.append(curline)
            extraeol.append('')

        # Write table
        res = tableXLaTeX(table, spec=spec, extraeol=extraeol)
        try:
            filename = os.path.join(outputdir, 'pptables_f%03d_%02dD.tex' % (df[1], df[0]))
            f = open(filename, 'w')
            f.write(header + '\n')
            f.write(res)
            if verbose:
                print 'Wrote table in %s' % filename
        except:
            raise
        else:
            f.close()
Пример #6
0
def main(dictAlg, order=None, outputdir='.', info='default',
         verbose=True):
    """Generates a figure showing the performance of algorithms.

    From a dictionary of :py:class:`DataSetList` sorted by algorithms,
    generates the cumulative distribution function of the bootstrap
    distribution of ERT for algorithms on multiple functions for
    multiple targets altogether.

    :param dict dictAlg: dictionary of :py:class:`DataSetList` instances
                         one instance is equivalent to one algorithm,
    :param list targets: target function values
    :param list order: sorted list of keys to dictAlg for plotting order
    :param str outputdir: output directory
    :param str info: output file name suffix
    :param bool verbose: controls verbosity

    """
    global x_limit  # late assignment of default, because it can be set to None in config 
    if 'x_limit' not in globals() or x_limit is None:
        x_limit = x_limit_default

    tmp = pp.dictAlgByDim(dictAlg)
    # tmp = pp.DictAlg(dictAlg).by_dim()

    if len(tmp) != 1:
        raise Exception('We never integrate over dimension.')
    dim = tmp.keys()[0]

    algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []]

    dictFunc = pp.dictAlgByFun(dictAlg)

    # Collect data
    # Crafting effort correction: should we consider any?
    CrEperAlg = {}
    for alg in algorithms_with_data:
        CrE = 0.
        if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL':
            tmp = dictAlg[alg].dictByNoise()
            assert len(tmp.keys()) == 1
            if tmp.keys()[0] == 'noiselessall':
                CrE = 0.5117
            elif tmp.keys()[0] == 'nzall':
                CrE = 0.6572
        CrEperAlg[alg] = CrE
        if CrE != 0.0: 
            print 'Crafting effort for', alg, 'is', CrE

    dictData = {} # list of (ert per function) per algorithm
    dictMaxEvals = {} # list of (maxevals per function) per algorithm
    bestERT = [] # best ert per function
    # funcsolved = [set()] * len(targets) # number of functions solved per target
    xbest2009 = []
    maxevalsbest2009 = []

    for f, dictAlgperFunc in dictFunc.iteritems():
        if function_IDs and f not in function_IDs:
            continue
        # print target_values((f, dim))
        for j, t in enumerate(target_values((f, dim))):
        # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)):
            # funcsolved[j].add(f)

            for alg in algorithms_with_data:
                x = [np.inf] * perfprofsamplesize
                runlengthunsucc = []
                try:
                    entry = dictAlgperFunc[alg][0] # one element per fun and per dim.
                    evals = entry.detEvals([t])[0]
                    runlengthsucc = evals[np.isnan(evals) == False] / entry.dim
                    runlengthunsucc = entry.maxevals[np.isnan(evals)] / entry.dim
                    if len(runlengthsucc) > 0:
                        x = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
                                             percentiles=[50],
                                             samplesize=perfprofsamplesize)[1]
                except (KeyError, IndexError):
                    #set_trace()
                    warntxt = ('Data for algorithm %s on function %d in %d-D '
                           % (alg, f, dim)
                           + 'are missing.\n')
                    warnings.warn(warntxt)

                dictData.setdefault(alg, []).extend(x)
                dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc)

        if displaybest2009:
            #set_trace()
            if not bestalg.bestalgentries2009:
                bestalg.loadBBOB2009()
            bestalgentry = bestalg.bestalgentries2009[(dim, f)]
            bestalgevals = bestalgentry.detEvals(target_values((f, dim)))
            # print bestalgevals
            for j in range(len(bestalgevals[0])):
                if bestalgevals[1][j]:
                    evals = bestalgevals[0][j]
                    #set_trace()
                    runlengthsucc = evals[np.isnan(evals) == False] / bestalgentry.dim
                    runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / bestalgentry.dim
                    x = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
                                         percentiles=[50],
                                         samplesize=perfprofsamplesize)[1]
                else:
                    x = perfprofsamplesize * [np.inf]
                    runlengthunsucc = []
                xbest2009.extend(x)
                maxevalsbest2009.extend(runlengthunsucc)
                
    if order is None:
        order = dictData.keys()

    # Display data
    lines = []
    if displaybest2009:
        args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11.,
                'markeredgewidth': 1.5, 'markerfacecolor': refcolor,
                'markeredgecolor': refcolor, 'color': refcolor,
                'label': 'best 2009', 'zorder': -1}
        lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009,
                                  CrE = 0., **args))

    for i, alg in enumerate(order):
        try:
            data = dictData[alg]
            maxevals = dictMaxEvals[alg]
        except KeyError:
            continue

        args = styles[(i) % len(styles)]
        args['linewidth'] = 1.5
        args['markersize'] = 12.
        args['markeredgewidth'] = 1.5
        args['markerfacecolor'] = 'None'
        args['markeredgecolor'] = args['color']
        args['label'] = alg
        #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib
        #elif len(show_algorithms) > 0:
            #args['color'] = 'wheat'
            #args['ls'] = '-'
            #args['zorder'] = -1
        lines.append(plotdata(np.array(data), x_limit, maxevals,
                                  CrE=CrEperAlg[alg], **args))

    labels, handles = plotLegend(lines, x_limit)
    if True: #isLateXLeg:
        fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info))
        try:
            f = open(fileName, 'w')
            f.write(r'\providecommand{\nperfprof}{7}')
            algtocommand = {}
            for i, alg in enumerate(order):
                tmp = r'\alg%sperfprof' % pptex.numtotext(i)
                f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex(toolsdivers.strip_pathname2(alg))))
                algtocommand[alg] = tmp
            commandnames = []
            if displaybest2009:
                tmp = r'\algzeroperfprof'
                f.write(r'\providecommand{%s}{best 2009}' % (tmp))
                algtocommand['best 2009'] = tmp

            for l in labels:
                commandnames.append(algtocommand[l])
            # f.write(headleg)
            f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}' % commandnames[0]) # TODO: check len(labels) > 0
            for i in range(1, len(labels)):
                f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i])
            f.write('}\n')
            # f.write(footleg)
            if verbose:
                print 'Wrote right-hand legend in %s' % fileName
        except:
            raise # TODO: Does this make sense?
        else:
            f.close()

    figureName = os.path.join(outputdir,'pprldmany_%s' % (info))
    #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat)
    beautify()

    text = 'f%s' % (ppfig.consecutiveNumbers(sorted(dictFunc.keys())))
    text += ',%d-D' % dim
    plt.text(0.01, 0.98, text, horizontalalignment="left",
             verticalalignment="top", transform=plt.gca().transAxes)

    a = plt.gca()

    plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative)
    xticks, labels = plt.xticks()
    tmp = []
    for i in xticks:
        tmp.append('%d' % round(np.log10(i)))
    a.set_xticklabels(tmp)
    ppfig.saveFigure(figureName, verbose=verbose)

    plt.close()
Пример #7
0
def main(dictAlg,
         sortedAlgs,
         outputdir='.',
         verbose=True,
         function_targets_line=True):  # [1, 13, 101]
    """Generate one table per func with results of multiple algorithms."""
    """Difference with the first version:

    * numbers aligned using the decimal separator
    * premices for dispersion measure
    * significance test against best algorithm
    * table width...

    Takes ``targetsOfInterest`` from this file as "input argument" to compute
    the desired target values. ``targetsOfInterest`` might be configured via 
    config.
    
    """

    # TODO: method is long, terrible to read, split if possible

    if not bestalg.bestalgentries2009:
        bestalg.loadBBOB2009()

    # Sort data per dimension and function
    dictData = {}
    dsListperAlg = list(dictAlg[i] for i in sortedAlgs)
    for n, entries in enumerate(dsListperAlg):
        tmpdictdim = entries.dictByDim()
        for d in tmpdictdim:
            tmpdictfun = tmpdictdim[d].dictByFunc()
            for f in tmpdictfun:
                dictData.setdefault((d, f), {})[n] = tmpdictfun[f]

    nbtests = len(dictData)

    for df in dictData:
        # Generate one table per df
        # first update targets for each dimension-function pair if needed:
        targets = targetsOfInterest((df[1], df[0]))
        targetf = targets[-1]

        # best 2009
        refalgentry = bestalg.bestalgentries2009[df]
        refalgert = refalgentry.detERT(targets)
        refalgevals = (refalgentry.detEvals((targetf, ))[0][0])
        refalgnbruns = len(refalgevals)
        refalgnbsucc = numpy.sum(numpy.isnan(refalgevals) == False)

        # Process the data
        # The following variables will be lists of elements each corresponding
        # to an algorithm
        algnames = []
        #algdata = []
        algerts = []
        algevals = []
        algdisp = []
        algnbsucc = []
        algnbruns = []
        algmedmaxevals = []
        algmedfinalfunvals = []
        algtestres = []
        algentries = []

        for n in sorted(dictData[df].keys()):
            entries = dictData[df][n]
            # the number of datasets for a given dimension and function (df)
            # should be strictly 1. TODO: find a way to warn
            # TODO: do this checking before... why wasn't it triggered by ppperprof?
            if len(entries) > 1:
                txt = ("There is more than a single entry associated with "
                       "folder %s on %d-D f%d." %
                       (sortedAlgs[n], df[0], df[1]))
                raise Exception(txt)

            entry = entries[0]
            algentries.append(entry)

            algnames.append(sortedAlgs[n])

            evals = entry.detEvals(targets)
            #tmpdata = []
            tmpdisp = []
            tmpert = []
            for i, e in enumerate(evals):
                succ = (numpy.isnan(e) == False)
                ec = e.copy(
                )  # note: here was the previous bug (changes made in e also appeared in evals !)
                ec[succ == False] = entry.maxevals[succ == False]
                ert = toolsstats.sp(ec, issuccessful=succ)[0]
                #tmpdata.append(ert/refalgert[i])
                if succ.any():
                    tmp = toolsstats.drawSP(ec[succ],
                                            entry.maxevals[succ == False],
                                            [10, 50, 90],
                                            samplesize=samplesize)[0]
                    tmpdisp.append((tmp[-1] - tmp[0]) / 2.)
                else:
                    tmpdisp.append(numpy.nan)
                tmpert.append(ert)
            algerts.append(tmpert)
            algevals.append(evals)
            #algdata.append(tmpdata)
            algdisp.append(tmpdisp)
            algmedmaxevals.append(numpy.median(entry.maxevals))
            algmedfinalfunvals.append(numpy.median(entry.finalfunvals))
            #algmedmaxevals.append(numpy.median(entry.maxevals)/df[0])
            #algmedfinalfunvals.append(numpy.median(entry.finalfunvals))

            algtestres.append(significancetest(refalgentry, entry, targets))

            # determine success probability for Df = 1e-8
            e = entry.detEvals((targetf, ))[0]
            algnbsucc.append(numpy.sum(numpy.isnan(e) == False))
            algnbruns.append(len(e))

        # Process over all data
        # find best values...

        nalgs = len(dictData[df])
        maxRank = 1 + numpy.floor(
            0.14 * nalgs)  # number of algs to be displayed in bold

        isBoldArray = []  # Point out the best values
        algfinaldata = [
        ]  # Store median function values/median number of function evaluations
        tmptop = getTopIndicesOfColumns(algerts, maxRank=maxRank)
        for i, erts in enumerate(algerts):
            tmp = []
            for j, ert in enumerate(erts):  # algi targetj
                tmp.append(i in tmptop[j] or
                           (nalgs > 7 and algerts[i][j] <= 3. * refalgert[j]))
            isBoldArray.append(tmp)
            algfinaldata.append((algmedfinalfunvals[i], algmedmaxevals[i]))

        # significance test of best given algorithm against all others
        best_alg_idx = numpy.array(algerts).argsort(0)[
            0, :]  # indexed by target index
        significance_versus_others = significance_all_best_vs_other(
            algentries, targets, best_alg_idx)[0]

        # Create the table
        table = []
        spec = r'@{}c@{}|*{%d}{@{\,}r@{}X@{\,}}|@{}r@{}@{}l@{}' % (
            len(targets)
        )  # in case StrLeft not working: replaced c@{} with l@{ }
        spec = r'@{}c@{}|*{%d}{@{}r@{}X@{}}|@{}r@{}@{}l@{}' % (
            len(targets)
        )  # in case StrLeft not working: replaced c@{} with l@{ }
        extraeol = []

        # Generate header lines
        if with_table_heading:
            header = funInfos[df[1]] if funInfos else 'f%d' % df[1]
            table.append([
                r'\multicolumn{%d}{@{\,}c@{\,}}{{\textbf{%s}}}' %
                (2 * len(targets) + 2, header)
            ])
            extraeol.append('')

        if function_targets_line is True or (function_targets_line and df[1]
                                             in function_targets_line):
            if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
                curline = [r'\#FEs/D']
                for i in targetsOfInterest.labels():
                    curline.append(r'\multicolumn{2}{@{}c@{}}{%s}' % i)

            else:
                curline = [r'$\Delta f_\mathrm{opt}$']
                for t in targets:
                    curline.append(
                        r'\multicolumn{2}{@{\,}X@{\,}}{%s}' %
                        writeFEvals2(t, precision=1, isscientific=True))
#                curline.append(r'\multicolumn{2}{@{\,}X@{}|}{%s}'
#                            % writeFEvals2(targets[-1], precision=1, isscientific=True))
            curline.append(r'\multicolumn{2}{@{}l@{}}{\#succ}')
            table.append(curline)
        extraeol.append(r'\hline')
        #        extraeol.append(r'\hline\arrayrulecolor{tableShade}')

        curline = [r'ERT$_{\text{best}}$'
                   ] if with_table_heading else [r'\textbf{f%d}' % df[1]]
        if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
            # write ftarget:fevals
            for i in xrange(len(refalgert[:-1])):
                temp = "%.1e" % targetsOfInterest((df[1], df[0]))[i]
                if temp[-2] == "0":
                    temp = temp[:-2] + temp[-1]
                curline.append(
                    r'\multicolumn{2}{@{}c@{}}{\textit{%s}:%s \quad}' %
                    (temp, writeFEvalsMaxPrec(refalgert[i], 2)))
            temp = "%.1e" % targetsOfInterest((df[1], df[0]))[-1]
            if temp[-2] == "0":
                temp = temp[:-2] + temp[-1]
            curline.append(r'\multicolumn{2}{@{}c@{}|}{\textit{%s}:%s }' %
                           (temp, writeFEvalsMaxPrec(refalgert[-1], 2)))
        else:
            # write #fevals of the reference alg
            for i in refalgert[:-1]:
                curline.append(r'\multicolumn{2}{@{}c@{}}{%s \quad}' %
                               writeFEvalsMaxPrec(i, 2))
            curline.append(r'\multicolumn{2}{@{}c@{}|}{%s}' %
                           writeFEvalsMaxPrec(refalgert[-1], 2))

        # write the success ratio for the reference alg
        tmp2 = numpy.sum(
            numpy.isnan(refalgevals) == False)  # count the nb of success
        curline.append('%d' % (tmp2))
        if tmp2 > 0:
            curline.append('/%d' % len(refalgevals))

        table.append(curline[:])
        extraeol.append('')

        #for i, gna in enumerate(zip((1, 2, 3), ('bla', 'blo', 'bli'))):
        #print i, gna, gno
        #set_trace()
        # Format data
        #if df == (5, 17):
        #set_trace()

        header = r'\providecommand{\ntables}{7}'
        for i, alg in enumerate(algnames):
            #algname, entries, irs, line, line2, succ, runs, testres1alg in zip(algnames,
            #data, dispersion, isBoldArray, isItalArray, nbsucc, nbruns, testres):
            commandname = r'\alg%stables' % numtotext(i)
            #            header += r'\providecommand{%s}{{%s}{}}' % (commandname, str_to_latex(strip_pathname(alg)))
            header += r'\providecommand{%s}{\StrLeft{%s}{\ntables}}' % (
                commandname, str_to_latex(strip_pathname2(alg)))
            curline = [
                commandname + r'\hspace*{\fill}'
            ]  # each list element becomes a &-separated table entry?

            for j, tmp in enumerate(
                    zip(
                        algerts[i],
                        algdisp[i],  # j is target index
                        isBoldArray[i],
                        algtestres[i])):
                ert, dispersion, isBold, testres = tmp
                alignment = '@{\,}X@{\,}'
                if j == len(algerts[i]) - 1:
                    alignment = '@{\,}X@{\,}|'

                data = ert / refalgert[j]
                # write star for significance against all other algorithms
                str_significance_subsup = ''
                if (len(best_alg_idx) > 0
                        and len(significance_versus_others) > 0
                        and i == best_alg_idx[j]
                        and nbtests * significance_versus_others[j][1] < 0.05):
                    logp = -numpy.ceil(
                        numpy.log10(
                            nbtests * significance_versus_others[j][1]))
                    str_significance_subsup = r"^{%s%s}" % (
                        significance_vs_others_symbol,
                        str(int(logp)) if logp > 1 else '')

                # moved out of the above else: this was a bug!?
                z, p = testres
                if (nbtests * p) < 0.05 and data < 1. and z < 0.:
                    if not numpy.isinf(refalgert[j]):
                        tmpevals = algevals[i][j].copy()
                        tmpevals[numpy.isnan(tmpevals)] = algentries[
                            i].maxevals[numpy.isnan(tmpevals)]
                        bestevals = refalgentry.detEvals(targets)
                        bestevals, bestalgalg = (bestevals[0][0],
                                                 bestevals[1][0])
                        bestevals[numpy.isnan(
                            bestevals)] = refalgentry.maxevals[bestalgalg][
                                numpy.isnan(bestevals)]
                        tmpevals = numpy.array(sorted(
                            tmpevals))[0:min(len(tmpevals), len(bestevals))]
                        bestevals = numpy.array(sorted(
                            bestevals))[0:min(len(tmpevals), len(bestevals))]

                    #The conditions are now that ERT < ERT_best and
                    # all(sorted(FEvals_best) > sorted(FEvals_current)).
                    if numpy.isinf(refalgert[j]) or all(tmpevals < bestevals):
                        nbstars = -numpy.ceil(numpy.log10(nbtests * p))
                        # tmp2[-1] += r'$^{%s}$' % superscript
                        str_significance_subsup += r'_{%s%s}' % (
                            significance_vs_ref_symbol,
                            str(int(nbstars)) if nbstars > 1 else '')
                if str_significance_subsup:
                    str_significance_subsup = '$%s$' % str_significance_subsup

                # format number in variable data
                if numpy.isnan(data):
                    curline.append(r'\multicolumn{2}{%s}{.}' % alignment)
                else:
                    if numpy.isinf(refalgert[j]):
                        curline.append(
                            r'\multicolumn{2}{%s}{\textbf{%s}\mbox{\tiny (%s)}%s}'
                            % (alignment, writeFEvalsMaxPrec(algerts[i][j], 2),
                               writeFEvalsMaxPrec(dispersion, precdispersion),
                               str_significance_subsup))
                        continue

                    tmp = writeFEvalsMaxPrec(data,
                                             precfloat,
                                             maxfloatrepr=maxfloatrepr)
                    if data >= maxfloatrepr or data < 0.01:  # either inf or scientific notation
                        if numpy.isinf(data) and j == len(algerts[i]) - 1:
                            tmp += r'\,\textit{%s}' % writeFEvalsMaxPrec(
                                algfinaldata[i][1],
                                0,
                                maxfloatrepr=maxfloatrepr)
                        else:
                            tmp = writeFEvalsMaxPrec(data,
                                                     precscien,
                                                     maxfloatrepr=data)
                            if isBold:
                                tmp = r'\textbf{%s}' % tmp

                        if not numpy.isnan(dispersion):
                            tmpdisp = dispersion / refalgert[j]
                            if tmpdisp >= maxfloatrepr or tmpdisp < 0.005:  # TODO: hack
                                tmpdisp = writeFEvalsMaxPrec(
                                    tmpdisp,
                                    precdispersion,
                                    maxfloatrepr=tmpdisp)
                            else:
                                tmpdisp = writeFEvalsMaxPrec(
                                    tmpdisp,
                                    precdispersion,
                                    maxfloatrepr=maxfloatrepr)
                            tmp += r'\mbox{\tiny (%s)}' % tmpdisp
                        curline.append(
                            r'\multicolumn{2}{%s}{%s%s}' %
                            (alignment, tmp, str_significance_subsup))
                    else:
                        tmp2 = tmp.split('.', 1)
                        if len(tmp2) < 2:
                            tmp2.append('')
                        else:
                            tmp2[-1] = '.' + tmp2[-1]
                        if isBold:
                            tmp3 = []
                            for k in tmp2:
                                tmp3.append(r'\textbf{%s}' % k)
                            tmp2 = tmp3
                        if not numpy.isnan(dispersion):
                            tmpdisp = dispersion / refalgert[j]
                            if tmpdisp >= maxfloatrepr or tmpdisp < 0.01:
                                tmpdisp = writeFEvalsMaxPrec(
                                    tmpdisp,
                                    precdispersion,
                                    maxfloatrepr=tmpdisp)
                            else:
                                tmpdisp = writeFEvalsMaxPrec(
                                    tmpdisp,
                                    precdispersion,
                                    maxfloatrepr=maxfloatrepr)
                            tmp2[-1] += (r'\mbox{\tiny (%s)}' % (tmpdisp))
                        tmp2[-1] += str_significance_subsup
                        curline.extend(tmp2)

            curline.append('%d' % algnbsucc[i])
            curline.append('/%d' % algnbruns[i])
            table.append(curline)
            extraeol.append('')

        # Write table
        res = tableXLaTeX(table, spec=spec, extraeol=extraeol)
        try:
            filename = os.path.join(
                outputdir, 'pptables_f%03d_%02dD.tex' % (df[1], df[0]))
            f = open(filename, 'w')
            f.write(header + '\n')
            f.write(res)
            if verbose:
                print 'Wrote table in %s' % filename
        except:
            raise
        else:
            f.close()