コード例 #1
0
 def _setupUserSpecifiedMetrics(self, metricPatterns):
     availableSymbols = [
         m.symbol for m in metric.Metric.discover(self._collectd)
     ]
     symbols = [
         symbol for symbol in availableSymbols
         if self._matches(symbol, metricPatterns)
     ]
     for symbol in symbols:
         logging.info('adding {0}'.format(symbol))
         self._measurements.append(metric.Metric(symbol, self._collectd))
コード例 #2
0
    def __init__(self, input_list):
        """Initialise Solver object. Raise ValueError if solution not possible."""

        if not self.solvable(input_list):
            raise ValueError('A solution is not possible')

        # don't just bind to input state. we want the object to have its OWN state
        # https://docs.python.org/2/library/copy.html
        self.initial_state = copy.deepcopy(self.list_to_grid(input_list))

        self.goal_state = self.set_goal_state(input_list)

        # using custom structures so we can implement a custom __contains__()
        self.frontier = custom_structures.Frontier()
        self.ast_frontier = custom_structures.Priority_Frontier()
        self.explored = custom_structures.Explored()

        # TODO: fringe metrics not working for ast (because we're passing it wrong frontier here)
        self.metrics = metric.Metric(self.frontier)
コード例 #3
0
    def getMetrics(self, paver):
        '''Gives a list of performance metrics to evaluate.'''

        if 'timeshift' not in paver.options:
            paver.options['timeshift'] = self._defaulttimeshift
        if 'nodeshift' not in paver.options:
            paver.options['nodeshift'] = self._defaultnodeshift
        if 'mintime' not in paver.options:
            paver.options['mintime'] = self._defaultmintime
        if 'failtime' not in paver.options:
            paver.options['failtime'] = self._defaultfailtime
        if 'failnodes' not in paver.options:
            paver.options['failnodes'] = self._defaultfailnodes
        if 'gaptol' not in paver.options:
            paver.options['gaptol'] = self._defaultgaptol
        if 'evalgap' not in paver.options or paver.options['evalgap'] is None:
            paver.options['evalgap'] = self._defaultevalgap
        if 'timerelimpr' not in paver.options:
            paver.options['timerelimpr'] = self._defaulttimerelimpr
        if 'boundrelimpr' not in paver.options:
            paver.options['boundrelimpr'] = self._defaultboundrelimpr
        if 'filtertime' not in paver.options:
            paver.options['filtertime'] = self._defaultfiltertime

        metrics = []

        # check whether we have dual bounds/gaps
        ignoredualbounds = 'ignoredualbounds' in paver.options and paver.options[
            'ignoredualbounds']

        valsbyattr = {}
        for attr in paver.aggrsolvedata.minor_axis:
            valsbyattr[attr] = paver.aggrsolvedata.loc[:, :, attr].stack()
        havegap = not ignoredualbounds and paver.hasSolveAttribute(
            'Gap') and valsbyattr['Gap'].nunique() > 1
        havedualbound = not ignoredualbounds and paver.hasSolveAttribute(
            'DualBound'
        ) and len(set(valsbyattr['Gap']) - set([-np.inf, np.inf])) > 0
        havedualgap = not ignoredualbounds and paver.hasSolveAttribute(
            'DualGap') and not np.isinf(valsbyattr['DualGap'].min())

        fails = paver.aggrsolvedata.loc[:, :, 'Fail'].astype(np.bool)
        failsany = fails.any(axis=1) | paver.instancedata['Fail']

        # mask for no fails (for each solver) and no fail on instance in general
        filternofail = ~fails
        filternofail[paver.instancedata['Fail']] = False
        filternofail.name = "no fail"

        # get instances without fail for all solver
        filterallnofail = ~failsany
        filterallnofail.name = "no fail by all solver"

        filterallnofailknownopt = None
        if paver.hasInstanceAttribute(
                'KnownPrimalBound') and paver.hasInstanceAttribute(
                    'KnownDualBound'):
            filterallnofailknownopt = filterallnofail & (
                paver.instancedata['KnownPrimalBound']
                == paver.instancedata['KnownDualBound'])
            filterallnofailknownopt.name = "no fail by all solver and known optimal value"

        # get instances solved by all solvers up to a certain gap
        filterallmaxgap = []
        if havegap:
            for g in [paver.options['gaptol']] + paver.options['evalgap']:
                f = (paver.aggrsolvedata.loc[:, :, 'Gap'] <= g).min(axis=1)
                f[failsany] = False
                f.name = "gap <= %.6g%% and no fail for all solvers" % (100 *
                                                                        g)
                filterallmaxgap.append(f)

        # get instances solved up to a certain gap
        filtermaxgap = []
        if havegap:
            for g in [paver.options['gaptol']] + paver.options['evalgap']:
                f = (paver.aggrsolvedata.loc[:, :, 'Gap'] <=
                     g)[filternofail].fillna(False).astype(np.bool)
                f.name = "gap <= %.6g%% and not failed" % (100 * g)
                filtermaxgap.append(f)

        # get instances where all solvers found a solution up to a certain quality
        filterallmaxprimgap = []
        if paver.hasSolveAttribute('PrimalGap'):
            for g in [paver.options['gaptol']] + paver.options['evalgap']:
                f = (paver.aggrsolvedata.loc[:, :, 'PrimalGap'] <= g).min(
                    axis=1)
                f[failsany] = False
                f.name = "within %.6g%% of known optimal value and no fail for all solvers" % (
                    100 * g)
                filterallmaxprimgap.append(f)

        # get instances where a solution up to a certain quality was found
        filtermaxprimgap = []
        if paver.hasSolveAttribute('PrimalGap'):
            for g in [paver.options['gaptol']] + paver.options['evalgap']:
                f = (paver.aggrsolvedata.loc[:, :, 'PrimalGap'] <=
                     g)[filternofail].fillna(False).astype(np.bool)
                f.name = "within %.6g%% of known optimal value and not failed" % (
                    100 * g)
                filtermaxprimgap.append(f)

        # get instances with a certain dual gap
        filtermaxdualgap = []
        if havedualgap:
            for g in [paver.options['gaptol']] + paver.options['evalgap']:
                f = (paver.aggrsolvedata.loc[:, :, 'DualGap'] <=
                     g)[filternofail].fillna(False).astype(np.bool)
                f.name = "dual gap <= %.6g%% and not failed" % (100 * g)
                filtermaxdualgap.append(f)

        # get instances with a certain minimal (max) solving time and no fail
        filterminmaxtime = (
            paver.aggrsolvedata.loc[:, :, 'SolverTime'].max(axis=1) >=
            paver.options['filtertime']
        )[filterallnofail].reindex_like(filterallnofail).fillna(False)
        filterminmaxtime.name = 'time >= ' + str(
            paver.options['filtertime']
        ) + ' by at least one solver and no fail for all solvers'
        if filterminmaxtime.sum() == 0:
            filterminmaxtime = None

        # get instances with a certain minimal (max) solving time and no fail and known optimal value
        if filterallnofailknownopt is not None and filterminmaxtime is not None:
            filterminmaxtimeknownopt = filterminmaxtime & filterallnofailknownopt
            filterminmaxtimeknownopt.name = 'time >= ' + str(
                paver.options['filtertime']
            ) + ' by at least one solver and no fail for all solvers and known optimal value'

        if paver.hasSolveAttribute(
                'NumberOfNodes'
        ) and 'filternodes' in paver.options and paver.options[
                'filternodes'] is not None:
            # get instances with a certain minimal (max) number of nodes and no fail
            filterminmaxnodes = (
                paver.aggrsolvedata.loc[:, :, 'NumberOfNodes'].max(axis=1) >=
                paver.options['filternodes']
            )[filterallnofail].reindex_like(filterallnofail).fillna(False)
            filterminmaxnodes.name = 'nodes >= ' + str(
                paver.options['filternodes']
            ) + ' by at least one solver and no fail for all solvers'

            # get instances with a certain minimal (max) solving time and no fail and known optimal value
            if filterallnofailknownopt is not None:
                filterminmaxnodesknownopt = filterminmaxnodes & filterallnofailknownopt
                filterminmaxnodesknownopt.name = 'nodes >= ' + str(
                    paver.options['filternodes']
                ) + ' by at least one solver and no fail for all solvers and known optimal value'
        else:
            filterminmaxnodes = None
            filterminmaxnodesknownopt = None

        m = metric.Metric('Status', 'Fail')
        filterfails = fails.copy()
        filterfails[paver.instancedata['Fail']] = True
        filterfails.name = 'instance or solve run marked as failed'
        m.filter = [filterfails]
        m.betterisup = True
        m.means = False
        m.quantiles = []
        m.boxplot = False
        metrics.append(m)

        # pylint: disable=E1101
        m = metric.Metric('Status', 'TerminationStatus')
        termstat = paver.aggrsolvedata.loc[:, :, 'TerminationStatus']
        filtertermnormal = (termstat == utils.TerminationStatus.Normal)
        filtertermnormal.name = "normal termination"
        filtertermlimit = (termstat > utils.TerminationStatus.Normal) & (
            termstat <= utils.TerminationStatus.OtherLimit)
        filtertermlimit.name = "exceeded some limit"
        filtertermuser = (termstat == utils.TerminationStatus.UserInterrupt)
        filtertermuser.name = "interrupted by user"
        filtertermcapa = (
            termstat == utils.TerminationStatus.CapabilityProblem)
        filtertermcapa.name = "capability problem"
        filtertermerror = (termstat >
                           utils.TerminationStatus.CapabilityProblem)
        filtertermerror.name = "error or other problem"
        m.filter = [
            filtertermnormal, filtertermlimit, filtertermuser, filtertermcapa,
            filtertermerror
        ]
        m.means = False
        m.quantiles = []
        m.boxplot = False
        metrics.append(m)
        # pylint: enable=E1101

        m = metric.Metric('Efficiency', 'SolverTime')
        m.shift = paver.options['timeshift']
        m.clip_lower = paver.options['mintime']
        m.failvalue = paver.options['failtime']
        if m.failvalue is not None:
            m.clip_upper = m.failvalue
        m.reltol = paver.options['timerelimpr']
        m.abstol = paver.options['mintime']
        m.filter = [None, filterallnofail]
        m.ppfilter = [filternofail]
        if filterminmaxtime is not None:
            m.filter.append(filterminmaxtime)
            m.ppfilter.append(filterminmaxtime)
        if filterminmaxnodes is not None:
            m.filter.append(filterminmaxnodes)
            m.ppfilter.append(filterminmaxnodes)
        m.filter += filterallmaxgap + filterallmaxprimgap
        m.ppfilter += filtermaxgap + filtermaxprimgap
        m.ppabsolute = True
        m.ppextended = 'extendedprofiles' in paver.options and paver.options[
            'extendedprofiles']
        metrics.append(m)

        if paver.hasSolveAttribute(
                'NumberOfNodes') and valsbyattr['NumberOfNodes'].max(
                ) > 1 and valsbyattr['NumberOfNodes'].nunique() > 1:
            m = metric.Metric('Efficiency', 'NumberOfNodes')
            m.shift = paver.options['nodeshift']
            m.clip_lower = 1
            m.failvalue = paver.options['failnodes']
            if m.failvalue is not None:
                m.clip_upper = m.failvalue
            m.reltol = 0.1
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            if len(filterallmaxgap) > 0:
                # w.r.t. all solved instances
                m.filter.append(filterallmaxgap[0])
                m.ppfilter = [filtermaxgap[0]]
                m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                    'extendedprofiles']
            metrics.append(m)

        if paver.hasSolveAttribute(
                'NumberOfIterations') and valsbyattr['NumberOfIterations'].max(
                ) > 1 and valsbyattr['NumberOfIterations'].nunique() > 1:
            m = metric.Metric('Efficiency', 'NumberOfIterations')
            #m.shift = paver.options['nodeshift'];
            m.clip_lower = 1
            #m.failvalue = paver.options['failnodes'];
            #if m.failvalue is not None :
            #    m.clip_upper = m.failvalue;
            m.reltol = 0.1
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            if len(filterallmaxgap) > 0:
                # w.r.t. all solved instances
                m.filter.append(filterallmaxgap[0])
                m.ppfilter = [filtermaxgap[0]]
                m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                    'extendedprofiles']
            metrics.append(m)

        for usereval in paver.options['eval']:
            ueval = _splitusereval(usereval)
            attrib = ueval['attrib']

            # skip if no interesting data
            if not paver.hasSolveAttribute(
                    attrib) or valsbyattr[attrib].nunique() <= 1:
                continue

            m = metric.Metric('--eval', attrib)

            #omit = evaluators.OmitFailedInstance;
            if 'fail' in ueval:
                m.failvalue = float(ueval['fail'])
                #omit = evaluators.OmitInconsistentInstance;
            if 'min' in ueval:
                m.clip_lower = float(ueval['min'])
            if 'max' in ueval:
                m.clip_upper = float(ueval['max'])
            if 'shift' in ueval:
                m.shift = float(ueval['shift'])
            if 'absimpr' in ueval:
                m.abstol = float(ueval['absimpr'])
            else:
                m.abstol = 0.0
            if 'relimpr' in ueval:
                m.reltol = float(ueval['relimpr'])
            else:
                m.reltol = 0.1
            m.filter = [None, filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            m.filter += filterallmaxgap + filterallmaxprimgap
            m.ppfilter = [filternofail] + filtermaxgap + filtermaxprimgap
            m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                'extendedprofiles']

            metrics.append(m)

        if paver.hasSolveAttribute(
                'PrimalDualIntegral') and not ignoredualbounds:
            m = metric.Metric('Efficiency', 'PrimalDualIntegral')
            m.clip_lower = paver.options['mintime']
            m.shift = paver.options['timeshift']
            m.failvalue = paver.options['failtime']
            if m.failvalue is not None:
                m.clip_upper = m.failvalue
            m.reltol = paver.options['timerelimpr']
            m.abstol = paver.options['mintime']
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            m.ppfilter = [filternofail]
            m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                'extendedprofiles']
            metrics.append(m)

        if havegap:
            # averages and quantiles on gap (more or less useful)
            m = metric.Metric('Solution Quality', 'Gap')
            m.clip_lower = 0
            m.clip_upper = 2.0
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            m.ppfilter = [filternofail]
            m.ppabsolute = True
            m.pprelative = False
            metrics.append(m)

            # counts on instance within a certain gap
            m = metric.Metric('Solution Quality', 'Gap')
            m.filter = filtermaxgap
            m.boxplot = False
            m.means = False
            m.quantiles = []
            metrics.append(m)

        if paver.hasSolveAttribute(
                'PrimalIntegral') and filterallnofailknownopt is not None:
            m = metric.Metric('Efficiency', 'PrimalIntegral')
            m.clip_lower = paver.options['mintime']
            m.shift = paver.options['timeshift']
            m.failvalue = paver.options['failtime']
            if m.failvalue is not None:
                m.clip_upper = m.failvalue
            m.reltol = paver.options['timerelimpr']
            m.abstol = paver.options['mintime']
            m.filter = [filterallnofailknownopt]
            if filterminmaxtimeknownopt is not None:
                m.filter.append(filterminmaxtimeknownopt)
            if filterminmaxnodesknownopt is not None:
                m.filter.append(filterminmaxnodesknownopt)
            m.ppfilter = [filternofail]
            m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                'extendedprofiles']
            metrics.append(m)

        if paver.hasSolveAttribute(
                'PrimalGap') and filterallnofailknownopt is not None:
            # averages and quantiles on primal gap (more or less useful)
            m = metric.Metric('Solution Quality', 'PrimalGap')
            m.clip_lower = 0
            m.clip_upper = 2.0
            m.filter = [filterallnofailknownopt]
            if filterminmaxtimeknownopt is not None:
                m.filter.append(filterminmaxtimeknownopt)
            if filterminmaxnodesknownopt is not None:
                m.filter.append(filterminmaxnodesknownopt)
            m.ppfilter = [filternofail]
            m.ppabsolute = True
            m.pprelative = False
            metrics.append(m)

            # counts on instance within a certain primal gap
            m = metric.Metric('Solution Quality', 'PrimalGap')
            m.filter = filtermaxprimgap
            m.boxplot = False
            m.means = False
            m.quantiles = []
            metrics.append(m)

        if paver.hasSolveAttribute(
                'DualIntegral'
        ) and not ignoredualbounds and filterallnofailknownopt is not None:
            m = metric.Metric('Efficiency', 'DualIntegral')
            m.clip_lower = paver.options['mintime']
            m.shift = paver.options['timeshift']
            m.failvalue = paver.options['failtime']
            if m.failvalue is not None:
                m.clip_upper = m.failvalue
            m.reltol = paver.options['timerelimpr']
            m.abstol = paver.options['mintime']
            m.filter = [filterallnofailknownopt]
            if filterminmaxtimeknownopt is not None:
                m.filter.append(filterminmaxtimeknownopt)
            if filterminmaxnodesknownopt is not None:
                m.filter.append(filterminmaxnodesknownopt)
            m.ppfilter = [filternofail]
            m.ppextended = 'extendedprofiles' in paver.options and paver.options[
                'extendedprofiles']
            metrics.append(m)

        if havedualgap and filterallnofailknownopt is not None:
            # averages and quantiles on dual gap (more or less useful)
            m = metric.Metric('Solution Quality', 'DualGap')
            m.clip_lower = 0
            m.clip_upper = 2.0
            m.filter = [filterallnofailknownopt]
            if filterminmaxtimeknownopt is not None:
                m.filter.append(filterminmaxtimeknownopt)
            if filterminmaxnodesknownopt is not None:
                m.filter.append(filterminmaxnodesknownopt)
            metrics.append(m)

            # counts on instance within a certain dual gap
            m = metric.Metric('Solution Quality', 'DualGap')
            m.filter = filtermaxdualgap
            m.boxplot = False
            m.means = False
            m.quantiles = []
            metrics.append(m)

        if paver.hasSolveAttribute('PrimalBound'):
            m = metric.Metric('Solution Quality', 'PrimalBound')
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            m.multbydirection = True
            m.means = False
            m.quantiles = []
            m.boxplot = False
            m.reltol = paver.options['boundrelimpr']
            metrics.append(m)

        if havedualbound:
            m = metric.Metric('Solution Quality', 'DualBound')
            m.filter = [filterallnofail]
            if filterminmaxtime is not None:
                m.filter.append(filterminmaxtime)
            if filterminmaxnodes is not None:
                m.filter.append(filterminmaxnodes)
            m.multbydirection = True
            m.betterisup = True
            m.means = False
            m.quantiles = []
            m.boxplot = False
            m.reltol = paver.options['boundrelimpr']
            metrics.append(m)

        return metrics
コード例 #4
0
    if len(path2submission) == 0:
        print "Error : missing submission directory."
        sys.exit(2)
    if len(path2groundtruth) == 0:
        print "Error : missing groundtruth directory."
        sys.exit(2)

    options.fingerprints = {}
    if len(path2fingerprint) != 0:
        # load list of available fingerprints as dictionary
        options.fingerprints = getListOfFingerprints(path2fingerprint)

    results = evaluateDirectory(path2groundtruth, gtName, path2submission,
                                subName, options)

    metrics = {}
    for eventType in results.keys():
        metrics[eventType] = metric.Metric()
        metrics[eventType].participant = results[eventType][0].participant
        metrics[eventType].submission = results[eventType][0].submission
        for r in results[eventType]:
            metrics[eventType].add(r)

    for eventType in results.keys():
        if options.verbosity > 0:
            print "----------------------------------------------"
        print "%s %s | %s | %s" % (metrics[eventType].participant,
                                   metrics[eventType].submission, eventType,
                                   metrics[eventType].description())