def coverage(self, pad=True, cursor=True) :
   with mdb.connection().row_cursor() if cursor else mdb.connection().cursor() as db :
     db.execute(self.query % self.__dict__)
     for result in db.fetchall() :
       yield result
     while (pad) : 
       message.warning('missing bucket')
       yield {}
 def coverage(self, pad=True, cursor=True):
     with mdb.connection().row_cursor() if cursor else mdb.connection(
     ).cursor() as db:
         db.execute(self.query % self.__dict__)
         for result in db.fetchall():
             yield result
         while (pad):
             message.warning('missing bucket')
             yield {}
 def __init__(self, log_ids=[], test_ids=[], xml=None, threshold=0, robust=False, previous=None) :
   'log_ids is a list of regression roots'
   self.log_ids = log_ids
   s_log_ids = ','.join(map(str, log_ids))
   self.tests = mdb.connection().row_cursor()
   if log_ids :
     # create table of individual runs, but not root node as this may have already summarised coverage
     self.tests.execute('CREATE TEMPORARY TABLE '+self.invs+' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN ('+s_log_ids+');')
     self.tests.execute('SELECT count(*) AS children FROM '+self.invs)
     children = self.tests.fetchone().children
     if children :
       message.information('%(log_ids)s %(has)s %(children)d children', log_ids=s_log_ids, children=children, has='have' if len(log_ids) > 1 else 'has')
   # append individual runs as given by test_ids
   if xml :
     xml_ids = xml.xml.xpath('/optimize/test/log_id/text()')
   else :
     xml_ids=[]
   if test_ids or xml_ids :
     s_test_ids = ','.join(map(str, test_ids+xml_ids))
     create = ('INSERT INTO '+self.invs) if log_ids else ('CREATE TEMPORARY TABLE '+self.invs+' AS')
     self.tests.execute(create+' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN ('+s_test_ids+') GROUP BY log_id;')
   self.tests.execute('SELECT count(*) AS tests FROM '+self.invs)
   tests = self.tests.fetchone().tests
   if tests < 1 :
     message.fatal('no tests')
   message.information('starting with %(count)d tests in table %(table)s', count=tests, table=self.invs)
   # check congruency
   self.cvg = mdb.connection().row_cursor()
   rows=self.cvg.execute("SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;")
   md5 = self.cvg.fetchall()
   if not md5 :
     message.fatal('no master')
   elif len(md5) > 1 :
     message.fatal('md5 of multiple masters do not match')
   else :
     message.debug('md5 query returns %(rows)d', rows=rows)
   self.master = mdb.accessor(md5=md5[0])
   self.cvg.execute("SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;")
   md5 = self.cvg.fetchall()
   if len(md5) > 1 :
     message.fatal('md5 of multiple axis masters do not match')
   self.master.axes = md5[0]
   # create status table, collating goal & hits
   self.cvg.execute('CREATE TEMPORARY TABLE '+self.covg+' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);')
   try :
     self.threshold = float(threshold)
   except :
     self.threshold = 0.0
     message.warning('cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f', arg=threshold, exception=sys.exc_info()[0], threshold=self.threshold)
   self.robust = robust
   self.previous = previous
 def __iter__(self) :
   self.reset()
   switched = False
   current = self.status()
   testlist = self.testlist()
   while testlist :
     log = testlist.pop(0)
     updates = self.increment(log.log_id)
     status  = self.status()
     yield mdb.accessor(log=log, last=current, updates=updates, status=status, hits=status.metric().hits-current.metric().hits)
     current = status
     # calculate incremental coverage of remaining tests
     with mdb.connection().row_cursor() as db :
       db.execute('DELETE FROM '+self.invs+' WHERE log_id = %s;', (log.log_id,))
       if status.metric().coverage() > self.threshold :
         if not switched :
           switched = True
           message.note('Switching to incremental selection at %(threshold)0.2f', threshold=self.threshold)
         # switch to incremental as coverage closes
         minexpr = '(status.goal+status.max_hits)-status.rhits' if self.robust else 'status.goal-status.hits'
         if self.robust :
           db.execute('SELECT invs.*, IFNULL(SUM(MIN((status.goal+status.max_hits)-status.rhits, hits.hits)), 0) AS hits FROM '+self.invs+' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '+self.covg+' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < (status.goal+status.max_hits)) GROUP BY log_id ORDER BY hits DESC;')
         else :
           db.execute('SELECT invs.*, IFNULL(SUM(MIN(status.goal-status.hits, hits.hits)), 0) AS hits FROM '+self.invs+' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '+self.covg+' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < status.goal) GROUP BY log_id ORDER BY hits DESC;')
         testlist = db.fetchall()
    def execute(self, subquery):
        with mdb.connection().row_cursor() as db:
            db.execute('''
SELECT
  log.*,
  message.*,
  COUNT(*) AS count
FROM
(
  SELECT
    MIN(message.date) AS start,
    MAX(message.date) AS stop,
    list.*
  FROM 
  %s AS list
  LEFT JOIN
    message
  USING
    (log_id)
  GROUP BY
    log_id
) AS log
NATURAL LEFT JOIN
  message
GROUP BY
  log.log_id,
  level
ORDER BY %s;
''' % (str(subquery), self.order))
            return db.fetchall()
 def __init__(self, goal_id, defaults, cumulative):
     self.all_nodes = dict()
     with mdb.connection().row_cursor() as db:
         db.execute(
             'SELECT * FROM point LEFT OUTER JOIN axis USING (point_id) LEFT OUTER JOIN enum USING (axis_id) WHERE log_id=%(goal_id)s ORDER BY point_id ASC, axis_id ASC, enum_id ASC;'
             % locals())
         points = db.fetchall()
     for parent, children in index.groupby(points,
                                           lambda row: row.point_id):
         _parent = self.all_nodes.get(parent.parent, None)
         if parent.axis_id:
             self.all_nodes[parent.point_id] = coverage.coverpoint(
                 name=parent.point_name,
                 description=parent.desc,
                 id=parent.point_id,
                 parent=_parent,
                 axes=self.get_axes(children),
                 defaults=defaults,
                 cumulative=cumulative)
         else:
             self.all_nodes[parent.point_id] = coverage.hierarchy(
                 parent.point_name,
                 parent.desc,
                 id=parent.point_id,
                 root=parent.root == None,
                 parent=_parent)
 def dump(self) :
   with mdb.connection().row_cursor() as db :
     db.execute('SELECT * FROM ' + self.covg)
     buckets = db.fetchall()
   def values() :
     for bucket in buckets : yield bucket
   return values()
  def execute(self, subquery) :
    with mdb.connection().row_cursor() as db :
      db.execute('''
SELECT
  log.*,
  message.*,
  COUNT(*) AS count
FROM
(
  SELECT
    MIN(message.date) AS start,
    MAX(message.date) AS stop,
    list.*
  FROM 
  %s AS list
  LEFT JOIN
    message
  USING
    (log_id)
  GROUP BY
    log_id
) AS log
NATURAL LEFT JOIN
  message
GROUP BY
  log.log_id,
  level
ORDER BY %s;
''' % (str(subquery), self.order))
      return db.fetchall()
 def result(self, log_id, buckets):
     with mdb.connection().row_cursor() as db:
         message.debug('retrieving %(log_id)s bucket coverage',
                       log_id=log_id)
         db.execute(
             'SELECT hits.log_id, log.test, log.description, SUM(hits.hits) AS hits FROM hits NATURAL JOIN log WHERE log.root = %(log_id)s AND bucket_id IN (%(buckets)s) GROUP BY hits.log_id ORDER BY hits DESC;'
             % locals())
         return db.fetchall()
 def result(self, log_id):
     with mdb.connection().row_cursor() as db:
         message.debug('retrieving %(log_id)s coverage information',
                       log_id=log_id)
         db.execute(
             'SELECT %(log_id)s as log_id, (SELECT log_id FROM goal WHERE log_id = %(log_id)s LIMIT 1) AS goal, (SELECT log_id FROM hits WHERE log_id = %(log_id)s LIMIT 1) AS coverage, (SELECT goal_id FROM master WHERE log_id = %(log_id)s LIMIT 1) AS master, (SELECT goal.log_id FROM goal JOIN log ON (log.root = goal.log_id) WHERE log.log_id = %(log_id)s LIMIT 1) AS root;'
             % locals())
         return db.fetchone()
    def dump(self):
        with mdb.connection().row_cursor() as db:
            db.execute('SELECT * FROM ' + self.covg)
            buckets = db.fetchall()

        def values():
            for bucket in buckets:
                yield bucket

        return values()
 def __init__(self, goal_id, defaults, cumulative) :
   self.all_nodes = dict()
   with mdb.connection().row_cursor() as db :
     db.execute('SELECT * FROM point LEFT OUTER JOIN axis USING (point_id) LEFT OUTER JOIN enum USING (axis_id) WHERE log_id=%(goal_id)s ORDER BY point_id ASC, axis_id ASC, enum_id ASC;' % locals())
     points = db.fetchall()
   for parent, children in index.groupby(points, lambda row : row.point_id) :
     _parent = self.all_nodes.get(parent.parent, None)
     if parent.axis_id :
       self.all_nodes[parent.point_id] = coverage.coverpoint(name=parent.point_name, description=parent.desc, id=parent.point_id, parent=_parent, axes=self.get_axes(children), defaults=defaults, cumulative=cumulative)
     else :
       self.all_nodes[parent.point_id] = coverage.hierarchy(parent.point_name, parent.desc, id=parent.point_id, root=parent.root == None, parent=_parent)
 def __init__(self, log_id):
     self.log_id = log_id
     self.db = mdb.connection().row_cursor()
     self.db.execute(
         'CREATE TEMPORARY TABLE ' + self.temp +
         ' (PRIMARY KEY (log_id)) SELECT log.*, 0 as new FROM log WHERE log.root = %s or log.log_id = %s;',
         (log_id, log_id))
     message.debug(
         'Start new incremental with table %(table)s, log_id %(log_id)s',
         table=self.temp,
         log_id=log_id)
 def status(self) :
   'calculate & return current coverage'
   with mdb.connection().cursor() as db :
     db.execute('SELECT SUM(MIN(goal, hits)) AS hits, SUM(goal) AS goal, SUM(MIN(goal+max_hits, rhits)) AS rhits, SUM(goal+max_hits) AS rgoal FROM '+self.covg+' WHERE goal > 0;')
     hits, goal, rhits, rgoal = db.fetchone()
     covrge=coverage.coverage(hits=hits, goal=goal)
     robust=coverage.coverage(hits=rhits, goal=rgoal)
     def metric() :
       'be clear instead of using lambda'
       return robust if self.robust else covrge
     return mdb.accessor(coverage=covrge, robust=robust, metric=metric)
 def result(self, log_id, offset, size) :
   '''
     log_id : regression id
     offset : first bucket index
     size   : number of buckets
   '''
   with mdb.connection().row_cursor() as db :
     message.debug('calculating %(log_id)s coverage heat map [%(offset)s+:%(size)s]', log_id=log_id, offset=offset, size=size)
     db.execute('SELECT bucket_id, SUM(hits) AS hits, count(hits) AS tests, '+db.split('test')+' AS testname FROM hits JOIN log USING (log_id) WHERE log.root = %s AND hits.bucket_id >= %s AND hits.bucket_id < %s GROUP BY bucket_id, testname ORDER BY bucket_id ASC, hits DESC;', (log_id, offset, offset+size))
     testnames = self.compress()
     data = list(index.groupby(db, lambda row : row.bucket_id, keyfact=lambda s : list(s._grouper(s.tgtkey)), grpfact=testnames))
     return dict(testnames=testnames.tests(), data=data)
    def status(self):
        'calculate & return current coverage'
        with mdb.connection().cursor() as db:
            db.execute(
                'SELECT SUM(MIN(goal, hits)) AS hits, SUM(goal) AS goal, SUM(MIN(goal+max_hits, rhits)) AS rhits, SUM(goal+max_hits) AS rgoal FROM '
                + self.covg + ' WHERE goal > 0;')
            hits, goal, rhits, rgoal = db.fetchone()
            covrge = coverage.coverage(hits=hits, goal=goal)
            robust = coverage.coverage(hits=rhits, goal=rgoal)

            def metric():
                'be clear instead of using lambda'
                return robust if self.robust else covrge

            return mdb.accessor(coverage=covrge, robust=robust, metric=metric)
 def __iter__(self):
     self.reset()
     switched = False
     current = self.status()
     testlist = self.testlist()
     while testlist:
         log = testlist.pop(0)
         updates = self.increment(log.log_id)
         status = self.status()
         yield mdb.accessor(log=log,
                            last=current,
                            updates=updates,
                            status=status,
                            hits=status.metric().hits -
                            current.metric().hits)
         current = status
         # calculate incremental coverage of remaining tests
         with mdb.connection().row_cursor() as db:
             db.execute('DELETE FROM ' + self.invs + ' WHERE log_id = %s;',
                        (log.log_id, ))
             if status.metric().coverage() > self.threshold:
                 if not switched:
                     switched = True
                     message.note(
                         'Switching to incremental selection at %(threshold)0.2f',
                         threshold=self.threshold)
                 # switch to incremental as coverage closes
                 minexpr = '(status.goal+status.max_hits)-status.rhits' if self.robust else 'status.goal-status.hits'
                 if self.robust:
                     db.execute(
                         'SELECT invs.*, IFNULL(SUM(MIN((status.goal+status.max_hits)-status.rhits, hits.hits)), 0) AS hits FROM '
                         + self.invs +
                         ' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '
                         + self.covg +
                         ' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < (status.goal+status.max_hits)) GROUP BY log_id ORDER BY hits DESC;'
                     )
                 else:
                     db.execute(
                         'SELECT invs.*, IFNULL(SUM(MIN(status.goal-status.hits, hits.hits)), 0) AS hits FROM '
                         + self.invs +
                         ' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '
                         + self.covg +
                         ' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < status.goal) GROUP BY log_id ORDER BY hits DESC;'
                     )
                 testlist = db.fetchall()
 def result(self, log_id, offset, size):
     '''
   log_id : regression id
   offset : first bucket index
   size   : number of buckets
 '''
     with mdb.connection().row_cursor() as db:
         message.debug(
             'calculating %(log_id)s coverage heat map [%(offset)s+:%(size)s]',
             log_id=log_id,
             offset=offset,
             size=size)
         db.execute(
             'SELECT bucket_id, SUM(hits) AS hits, count(hits) AS tests, ' +
             db.split('test') +
             ' AS testname FROM hits JOIN log USING (log_id) WHERE log.root = %s AND hits.bucket_id >= %s AND hits.bucket_id < %s GROUP BY bucket_id, testname ORDER BY bucket_id ASC, hits DESC;',
             (log_id, offset, offset + size))
         testnames = self.compress()
         data = list(
             index.groupby(db,
                           lambda row: row.bucket_id,
                           keyfact=lambda s: list(s._grouper(s.tgtkey)),
                           grpfact=testnames))
         return dict(testnames=testnames.tests(), data=data)
 def __init__(self,
              log_ids=[],
              test_ids=[],
              xml=None,
              threshold=0,
              robust=False,
              previous=None):
     'log_ids is a list of regression roots'
     self.log_ids = log_ids
     s_log_ids = ','.join(map(str, log_ids))
     self.tests = mdb.connection().row_cursor()
     if log_ids:
         # create table of individual runs, but not root node as this may have already summarised coverage
         self.tests.execute(
             'CREATE TEMPORARY TABLE ' + self.invs +
             ' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN ('
             + s_log_ids + ');')
         self.tests.execute('SELECT count(*) AS children FROM ' + self.invs)
         children = self.tests.fetchone().children
         if children:
             message.information(
                 '%(log_ids)s %(has)s %(children)d children',
                 log_ids=s_log_ids,
                 children=children,
                 has='have' if len(log_ids) > 1 else 'has')
     # append individual runs as given by test_ids
     if xml:
         xml_ids = xml.xml.xpath('/optimize/test/log_id/text()')
     else:
         xml_ids = []
     if test_ids or xml_ids:
         s_test_ids = ','.join(map(str, test_ids + xml_ids))
         create = ('INSERT INTO ' +
                   self.invs) if log_ids else ('CREATE TEMPORARY TABLE ' +
                                               self.invs + ' AS')
         self.tests.execute(
             create +
             ' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN ('
             + s_test_ids + ') GROUP BY log_id;')
     self.tests.execute('SELECT count(*) AS tests FROM ' + self.invs)
     tests = self.tests.fetchone().tests
     if tests < 1:
         message.fatal('no tests')
     message.information('starting with %(count)d tests in table %(table)s',
                         count=tests,
                         table=self.invs)
     # check congruency
     self.cvg = mdb.connection().row_cursor()
     rows = self.cvg.execute(
         "SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN "
         + self.invs +
         " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;"
     )
     md5 = self.cvg.fetchall()
     if not md5:
         message.fatal('no master')
     elif len(md5) > 1:
         message.fatal('md5 of multiple masters do not match')
     else:
         message.debug('md5 query returns %(rows)d', rows=rows)
     self.master = mdb.accessor(md5=md5[0])
     self.cvg.execute(
         "SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN "
         + self.invs +
         " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;"
     )
     md5 = self.cvg.fetchall()
     if len(md5) > 1:
         message.fatal('md5 of multiple axis masters do not match')
     self.master.axes = md5[0]
     # create status table, collating goal & hits
     self.cvg.execute(
         'CREATE TEMPORARY TABLE ' + self.covg +
         ' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);'
     )
     try:
         self.threshold = float(threshold)
     except:
         self.threshold = 0.0
         message.warning(
             'cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f',
             arg=threshold,
             exception=sys.exc_info()[0],
             threshold=self.threshold)
     self.robust = robust
     self.previous = previous
 def __init__(self, log_id) :
   self.log_id = log_id
   self.db = mdb.connection().row_cursor()
   self.db.execute('CREATE TEMPORARY TABLE '+self.temp+' (PRIMARY KEY (log_id)) SELECT log.*, 0 as new FROM log WHERE log.root = %s or log.log_id = %s;', (log_id, log_id))
   message.debug('Start new incremental with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=log_id)
 def result(self, log_id, buckets) :
   with mdb.connection().row_cursor() as db :
     message.debug('retrieving %(log_id)s bucket coverage', log_id=log_id)
     db.execute('SELECT hits.log_id, log.test, log.description, SUM(hits.hits) AS hits FROM hits NATURAL JOIN log WHERE log.root = %(log_id)s AND bucket_id IN (%(buckets)s) GROUP BY hits.log_id ORDER BY hits DESC;' % locals())
     return db.fetchall()
 def result(self, log_id) :
   with mdb.connection().row_cursor() as db :
     message.debug('retrieving %(log_id)s coverage information', log_id=log_id)
     db.execute('SELECT %(log_id)s as log_id, (SELECT log_id FROM goal WHERE log_id = %(log_id)s LIMIT 1) AS goal, (SELECT log_id FROM hits WHERE log_id = %(log_id)s LIMIT 1) AS coverage, (SELECT goal_id FROM master WHERE log_id = %(log_id)s LIMIT 1) AS master, (SELECT goal.log_id FROM goal JOIN log ON (log.root = goal.log_id) WHERE log.log_id = %(log_id)s LIMIT 1) AS root;' % locals())
     return db.fetchone()
 def result(self, log_id):
     with mdb.connection().row_cursor() as db:
         message.debug('retrieving %(log_id)s messages', log_id=log_id)
         db.execute('SELECT * FROM message WHERE log_id = %(log_id)s;' %
                    locals())
         return db.fetchall()
 def result(self, log_id):
   with mdb.connection().row_cursor() as db :
     message.debug('retrieving %(log_id)s messages', log_id=log_id)
     db.execute('SELECT * FROM message WHERE log_id = %(log_id)s;' % locals())
     return db.fetchall()