Exemplo n.º 1
0
def assert_circle_runtimes( runtimes_dir ):
    ""
    tr = fmtresults.TestResults( runtimes_dir+'/'+fmtresults.runtimes_filename )

    topdir = ''
    assert len( tr.dirList() ) == 1
    assert tr.dirList() == [ topdir+'two' ]
    assert tr.testAttrs(topdir+'two','circle')['xtime'] >= 3
Exemplo n.º 2
0
def get_results_platform_compiler_id(filename):
    ""
    tr = fmtresults.TestResults(filename)

    platname, cplrname = tr.platform(), tr.compiler()

    platcplr = platname + '/' + cplrname

    return platcplr
Exemplo n.º 3
0
def assert_cat_dog_runtimes( runtimes_dir ):
    ""
    tr = fmtresults.TestResults( runtimes_dir+'/'+fmtresults.runtimes_filename )

    topdir = ''
    assert len( tr.dirList() ) == 1
    assert tr.dirList() == [ topdir+'one' ]
    assert tr.testList(topdir+'one') == ['cat','dog']
    assert tr.testAttrs(topdir+'one','cat')['xtime'] >= 1
    assert tr.testAttrs(topdir+'one','dog')['xtime'] >= 2
Exemplo n.º 4
0
def assert_results_file_does_not_have_tests(filename, *tests, **kwargs):
    ""
    tr = fmtresults.TestResults(filename)

    topdir = kwargs.pop('topdir', None)
    if topdir == None:
        topdir = os.path.basename(os.getcwd())

    for tst in tests:
        rootrel, testkey = os.path.split(topdir + '/' + tst)
        assert len(tr.testAttrs(rootrel, testkey)) == 0
Exemplo n.º 5
0
def copy_results_file_with_new_platid( filename, newfilename, newplatid=None,
                                       newtestdir=None ):
    ""
    tr = fmtresults.TestResults( filename )
    mach = tr.machine()

    if newtestdir:
        tdir = newtestdir
    else:
        tdir = tr.testdir()

    if newplatid:
        plat,cplr = newplatid.split('/')
    else:
        plat,cplr = tr.platform(), tr.compiler()

    tr.writeResults( newfilename, plat, cplr, mach, tdir )
Exemplo n.º 6
0
def results_listing( fname, optD ):
    """
    by default, lists the tests by date
    the -p option means list the platform/compilers referenced by at least one
    test
    """
    fmt,vers,hdr,nskip = fmtresults.read_file_header( fname )
    
    if fmt and fmt == 'results':
      src = fmtresults.TestResults()
      src.readResults(fname)
      
      if '-p' in optD:
        p = hdr.get( 'PLATFORM', '' )
        c = hdr.get( 'COMPILER', '' )
        if p or c:
          print3( p+'/'+c )
      
      else:
        tL = []
        for d in src.dirList():
          for tn in src.testList(d):
            aD = src.testAttrs(d,tn)
            if 'xdate' in aD:
              tL.append( ( aD['xdate'], tn, d, aD ) )
        tL.sort()
        tL.reverse()
        for xdate,tn,d,aD in tL:
          print3( fmtresults.make_attr_string(aD), d+'/'+tn )
    
    elif fmt and fmt == 'multi':
      src = fmtresults.MultiResults()
      src.readFile(fname)
      
      if '-p' in optD:
        pcD = {}
        for d in src.dirList():
          for tn in src.testList(d):
            for pc in src.platformList(d,tn):
              pcD[pc] = None
        pcL = list( pcD.keys() )
        pcL.sort()
        for pc in pcL:
          print3( pc )
      
      else:
        tL = []
        for d in src.dirList():
          for tn in src.testList(d):
            for pc in src.platformList(d,tn):
              aD = src.testAttrs(d,tn,pc)
              if 'xdate' in aD:
                tL.append( ( aD['xdate'], tn, d, pc, aD ) )
        tL.sort()
        tL.reverse()
        for xdate,tn,d,pc,aD in tL:
          print3( fmtresults.make_attr_string(aD), pc, d+'/'+tn )
    
    else:
      sys.stderr.write( "Cannot list due to unknown file format: " + \
                        fname + os.linesep )
Exemplo n.º 7
0
def write_runtimes( optD, fileL ):
    """
    Read test results from the list of files in 'fileL' and write to runtimes
    files in the test source tree.
    
    The list of files in 'fileL' can be either in multi-platform format or
    single platform test results format.
    
    Since each test may have multiple entries in the 'fileL' list, the run
    time of each entry is averaged, and the average is used as the run time for
    the test.
    
    If the test source root directory cannot be determined (by looking for
    an existing runtimes file), then the current working directory is assumed
    to be the root directory, and is marked as such by the new runtimes file.

    If a runtimes file does not exist in the current directory, one will be
    created.
    
    Existing runtimes files in subdirectories of the current directory are
    updated as well as the one in the current directory.
    
    New test entries in existing runtimes files may be added but none are
    removed.  If a test is contained in the 'fileL' list and in an existing
    runtimes file, then the entry is overwritten with the 'fileL' value in the
    runtimes file.
    """
    warnL = []
    
    cwd = os.getcwd()
    rootrel = fmtresults.file_rootrel( cwd )
    if rootrel == None:
        # assume the current directory is the test tree root directory
        rootrel = os.path.basename( cwd )
    
    # for each (test dir, test key) pair, store a list of tests attr dicts
    testD = {}
    
    # read the tests from the source files; only save the tests that are
    # subdirectories of the rootrel (or equal to the rootrel)
    rrdirL = rootrel.split('/')
    rrlen = len(rrdirL)
    for srcf in fileL:
      try:
        fmt,vers,hdr,nskip = fmtresults.read_file_header( srcf )
      except Exception:
        warnL.append( "Warning: skipping results file: " + srcf + \
                     ", Exception = " + str(sys.exc_info()[1]) )
      else:
        if fmt and fmt == 'results':
          src = fmtresults.TestResults()
          try:
            src.readResults(srcf)
          except Exception:
            warnL.append( "Warning: skipping results file: " + srcf + \
                         ", Exception = " + str(sys.exc_info()[1]) )
          else:
            for d in src.dirList():
              if d.split('/')[:rrlen] == rrdirL:
                for tn in src.testList(d):
                  aD = src.testAttrs( d, tn )
                  if aD.get('result','') in ['pass','diff']:
                    k = (d,tn)
                    if k in testD: testD[k].append(aD)
                    else:          testD[k] = [aD]
        elif fmt and fmt == 'multi':
          src = fmtresults.MultiResults()
          try:
            src.readFile(srcf)
          except Exception:
            warnL.append( "Warning: skipping results file: " + srcf + \
                         ", Exception = " + str(sys.exc_info()[1]) )
          else:
            for d in src.dirList():
              if d.split('/')[:rrlen] == rrdirL:
                for tn in src.testList(d):
                  for pc in src.platformList( d, tn ):
                    aD = src.testAttrs( d, tn, pc )
                    if aD.get('result','') in ['pass','diff']:
                      k = (d,tn)
                      if k in testD: testD[k].append(aD)
                      else:          testD[k] = [aD]
        else:
          warnL.append( "Warning: skipping results source file due to error: " + \
                       srcf + ", corrupt or unknown format" )
    
    # for each test, average the times found in the source files
    avgD = {}
    for k,aL in testD.items():
      d,tn = k
      tsum = 0
      tnum = 0
      save_aD = None
      for aD in aL:
        t = aD.get( 'xtime', 0 )
        if t > 0:
          tsum += t
          tnum += 1
          # use the attributes of the test with the most recent date
          if 'xdate' in aD:
            if save_aD == None or save_aD['xdate'] < aD['xdate']:
              save_aD = aD
      if save_aD != None:
        t = int( tsum/tnum )
        save_aD['xtime'] = t
        avgD[k] = save_aD
    
    tr = fmtresults.TestResults()
    rtdirD = {}  # runtimes directory -> root relative path
    
    # read any existing runtimes files at or below the CWD
    def read_src_dir( trs, rtD, msgs, dirname ):
        rtf = os.path.join( dirname, fmtresults.runtimes_filename )
        if os.path.isfile(rtf):
            try:
                fmt,vers,hdr,nskip = fmtresults.read_file_header( rtf )
                rr = hdr.get( 'ROOT_RELATIVE', None )
                trs.mergeRuntimes(rtf)
            except Exception:
                msgs.append( "Warning: skipping existing runtimes file due to " + \
                             "error: " + rtf + ", Exception = " + \
                             str(sys.exc_info()[1]) )
            else:
              if rr == None:
                  msgs.append( "Warning: skipping existing runtimes file " + \
                               "because it does not contain the ROOT_RELATIVE " + \
                               "specification: " + rtf )
              else:
                  rtD[dirname] = rr

    for root,dirs,files in os.walk( cwd ):
        read_src_dir( tr, rtdirD, warnL, root )

    if '-w' in optD:
      # the -w option means don't merge
      tr = fmtresults.TestResults()
    
    # merge in the tests with average timings
    for k,aD in avgD.items():
      d,tn = k
      tr.addTestName( d, tn, aD )
    
    # make sure top level is included then write out the runtimes files
    rtdirD[ cwd ] = rootrel
    for rtdir,rrel in rtdirD.items():
      tr.writeRuntimes( rtdir, rrel )
    
    return warnL
Exemplo n.º 8
0
def get_results_test_time_from_file(filename, testname):
    ""
    tr = fmtresults.TestResults(filename)
    return get_results_test_time(tr, testname)
Exemplo n.º 9
0
def assert_empty_testresults_file(filename):
    ""
    tr = fmtresults.TestResults(filename)
    assert len(tr.dirList()) == 0