Esempio n. 1
0
def generateData(dataSet, target):
    """Returns an array of results to be plotted.

    Oth column is ert, 1st is the success rate, 2nd the number of
    successes, 3rd the mean of the number of function evaluations, and
    4th the median of number of function evaluations of successful runs
    or numpy.nan.

    """
    res = []

    data = dataSet.detEvals([target])[0]
    succ = (numpy.isnan(data) == False)
    data[numpy.isnan(data)] = dataSet.maxevals[numpy.isnan(data)]
    res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False))
    res.append(numpy.mean(data))
    if res[2] > 0:
        res.append(toolsstats.prctile(data[succ], 50)[0])
    else:
        res.append(numpy.nan)
    res[3] = numpy.max(dataSet.maxevals)
    return res
Esempio n. 2
0
def generateData(dataSet, targetFuncValue):
    """Returns an array of results to be plotted.

    1st column is ert, 2nd is  the number of success, 3rd the success
    rate, 4th the sum of the number of  function evaluations, and
    finally the median on successful runs.

    """
    res = []
    data = []

    it = iter(reversed(dataSet.evals))
    i = it.next()
    prev = np.array([np.nan] * len(i))

    while i[0] <= targetFuncValue:
        prev = i
        try:
            i = it.next()
        except StopIteration:
            break

    data = prev[1:].copy()  # keep only the number of function evaluations.
    succ = (np.isnan(data) == False)
    if succ.any():
        med = toolsstats.prctile(data[succ], 50)[0]
        #Line above was modified at rev 3050 to make sure that we consider only
        #successful trials in the median
    else:
        med = np.nan

    data[np.isnan(data)] = dataSet.maxevals[np.isnan(data)]

    res = []
    res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False))
    res.append(np.mean(data))  #mean(FE)
    res.append(med)

    return np.array(res)
Esempio n. 3
0
def generateTable(dsList, CrE=0., outputdir='.', info='default', verbose=True):
    """Generates ERT loss ratio tables.

    :param DataSetList dsList: input data set
    :param float CrE: crafting effort (see COCO documentation)
    :param string outputdir: output folder (must exist)
    :param string info: string suffix for output file names
    :param bool verbose: controls verbosity

    """

    #Set variables
    prcOfInterest = [0, 10, 25, 50, 75, 90]
    for d, dsdim in dsList.dictByDim().iteritems():
        res = []
        maxevals = []
        funcs = []
        mFE = []

        for i in dsdim:
            maxevals.append(max(i.ert[np.isinf(i.ert)==False]))
            funcs.append(i.funcId)
            mFE.append(max(i.maxevals))

        maxevals = max(maxevals)
        mFE = max(mFE)
        EVALS = [2.*d]
        EVALS.extend(10.**(np.arange(1, np.log10(1e-9 + maxevals * 1./d))) * d)
        #Set variables: Done
        data = generateData(dsList, EVALS, CrE)

        tmp = "\\textbf{\\textit{f}\\raisebox{-0.35ex}{%d}--\\textit{f}\\raisebox{-0.35ex}{%d} in %d-D}, maxFE/D=%s" \
            % (min(funcs), max(funcs), d, writeFEvals2(int(mFE/d), maxdigits=6))

        res.append(r" & \multicolumn{" + str(len(prcOfInterest)) + "}{|c}{" + tmp + "}")

        header = ["\\#FEs/D"]
        for i in prcOfInterest:
            if i == 0:
                tmp = "best"
            elif i == 50:
                tmp = "\\textbf{med}"
            else:
                tmp = "%d\\%%" % i
            header.append(tmp)

        #set_trace()
        res.append(" & ".join(header))
        for i in range(len(EVALS)):
            tmpdata = list(data[f][i] for f in data)
            #set_trace()
            tmpdata = toolsstats.prctile(tmpdata, prcOfInterest)
            # format entries
            #tmp = [writeFEvals(EVALS[i]/d, '.0')]
            if EVALS[i]/d < 200:
                tmp = [writeFEvals2(EVALS[i]/d, 3)]
            else:
                tmp = [writeFEvals2(EVALS[i]/d, 1)]
            for j in tmpdata:
                # tmp.append(writeFEvals(j, '.2'))
                # tmp.append(writeFEvals2(j, 2))
                if j == 0.:
                    tmp.append("~\\,0")
                elif j < 1:
                    tmp.append("~\\,%1.2f" % j)
                elif j < 10:
                    tmp.append("\\hspace*{1ex}%1.1f" % j)
                elif j < 100:
                    tmp.append("%2.0f" % j)
                else:
                    ar = ("%1.1e" % j).split('e')
                    tmp.append(ar[0] + 'e' + str(int(ar[1])))
                # print tmp[-1]
            res.append(" & ".join(tmp))

        # add last line: runlength distribution for which 1e-8 was not reached.
        tmp = [r"$\text{RL}_{\text{US}}$/D"]
        tmpdata = []
        for i in dsList:
            it = reversed(i.evals)
            curline = None
            nextline = it.next()
            while nextline[0] <= f_thresh:
                curline = nextline[1:]
                nextline = it.next()
            if curline is None:
                tmpdata.extend(i.maxevals)
            else:
                tmpdata.extend(i.maxevals[np.isnan(curline)])

        #set_trace()
        if tmpdata: # if it is not empty
            tmpdata = toolsstats.prctile(tmpdata, prcOfInterest)
            for j in tmpdata:
               tmp.append(writeFEvals2(j/d, 1))
            res.append(" & ".join(tmp))

        res = (r"\\"+ "\n").join(res)
        res = r"\begin{tabular}{c|" + len(prcOfInterest) * "l" +"}\n" + res
        #res = r"\begin{tabular}{ccccc}" + "\n" + res
        res = res + "\n" + r"\end{tabular}" + "\n"

        filename = os.path.join(outputdir, 'pploglosstable_%02dD_%s.tex' % (d, info))
        f = open(filename, 'w')
        f.write(res)
        f.close()
        if verbose:
            print( "Wrote ERT loss ratio table in %s." % filename )
Esempio n. 4
0
def plot(dsList, param='dim', targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)):
    """Generate plot of ERT vs param."""

    dictparam = dsList.dictByParam(param)
    params = sorted(dictparam) # sorted because we draw lines

    # generate plot from dsList
    res = []
    # collect data
    rawdata = {}
    for p in params:
        assert len(dictparam[p]) == 1
        rawdata[p] = dictparam[p][0].detEvals(targets)
        # expect dictparam[p] to have only one element

    # plot lines for ERT
    xpltdata = params
    for i, t in enumerate(targets):
        ypltdata = []
        for p in params:
            data = rawdata[p][i]
            unsucc = np.isnan(data)
            assert len(dictparam[p]) == 1
            data[unsucc] = dictparam[p][0].maxevals
            # compute ERT
            ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
            ypltdata.append(ert)
        res.extend(plt.plot(xpltdata, ypltdata, markersize=20,
                   zorder=len(targets) - i, **styles[i]))
        # for the legend
        plt.plot([], [], markersize=10,
                 label=' %+d' % (np.log10(targets[i])),
                 **styles[i])

    # plot median of successful runs for hardest target with a success
    for p in params:
        for i, t in enumerate(reversed(targets)): # targets has to be from hardest to easiest
            data = rawdata[p][i]
            data = data[np.isnan(data) == False]
            if len(data) > 0:
                median = toolsstats.prctile(data, 50.)[0]
                res.extend(plt.plot(p, median, styles[i]['color'], **medmarker))
                break

    # plot average number of function evaluations for the hardest target
    xpltdata = []
    ypltdata = []
    for p in params:
        data = rawdata[p][0] # first target
        xpltdata.append(p)
        if (np.isnan(data) == False).all():
            tmpdata = data.copy()
            assert len(dictparam[p]) == 1
            tmpdata[np.isnan(data)] = dictparam[p][0].maxevals[np.isnan(data)]
            tmp = np.mean(tmpdata)
        else:
            tmp = np.nan # Check what happens when plotting NaN
        ypltdata.append(tmp)
    res.extend(plt.plot(xpltdata, ypltdata, **avgstyle))

    # display numbers of successes for hardest target where there is still one success
    for p in params:
        for i, t in enumerate(targets): # targets has to be from hardest to easiest
            data = rawdata[p][i]
            unsucc = np.isnan(data)
            assert len(dictparam[p]) == 1
            data[unsucc] = dictparam[p][0].maxevals
            # compute ERT
            ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
            if srate == 1.:
                break
            elif succ > 0:
                res.append(plt.text(p, ert * 1.85, "%d" % succ, axes=plt.gca(),
                                    horizontalalignment="center",
                                    verticalalignment="bottom"))
                break
    return res