Beispiel #1
0
def results_clean( path, optD ):
    """
    -p <plat/cplr> means remove tests associated with that platform/compiler
    """
    if not os.path.exists(path):
      raise Exception( "Path does not exist: " + path )
    
    msgL = []
    
    if os.path.isdir(path):
      # assume path to a test source tree so look for a runtimes file
      fname = os.path.join( path, fmtresults.runtimes_filename )
      if os.path.exists(fname):
        path = fname
      else:
        raise Exception( "Specified directory does not contain a " + \
                         "test source tree runtimes file: " + path )
    
    if '-p' not in optD:
      msgL.append( "Warning: nothing to do without the -p option " + \
                   "(currently)" )
    
    fmt,vers,hdr,nskip = fmtresults.read_file_header( path )
    if fmt and fmt == 'results':
      if '-p' in optD:
        msgL.append( "Warning: the -p option has no effect on results files" )
      else:
        pass
    elif fmt and fmt == 'multi':
      if '-p' in optD:
        xpc = optD['-p']
        mr = fmtresults.MultiResults()
        src = fmtresults.MultiResults()
        src.readFile(path)
        for d in src.dirList():
          for tn in src.testList(d):
            for pc in src.platformList(d,tn):
              if pc != xpc:
                aD = src.testAttrs( d, tn, pc )
                mr.addTestName( d, tn, pc, aD )
        mr.writeFile( path )
      else:
        pass
    else:
      raise Exception( "Unknown file format: " + path )
    
    return msgL
Beispiel #2
0
def assert_multi_results_file_has_tests(filename, num_plats, *tests):
    ""
    mr = fmtresults.MultiResults()
    mr.readFile(filename)

    for tst in tests:
        rootrel, testkey = os.path.split(tst)
        assert len(mr.platformList(rootrel, testkey)) == num_plats
Beispiel #3
0
def multiplatform_merge( optD, fileL ):
    """
    Read results file(s) and merge test entries into the multi-platform
    timings file contained in the current working directory.
    
    The files in 'fileL' can be single platform or multi-platform formatted
    files.
    
    Only tests that "pass", "diff" or "timeout" will be merged in.
    """
    dcut = None
    if '-d' in optD:
        dcut = int( time.time() - optD['-d']*24*60*60 )
    wopt = '-w' in optD
    xopt = '-x' in optD

    process_files( optD, fileL, None )
    
    mr = fmtresults.MultiResults()
    if os.path.exists( fmtresults.multiruntimes_filename ):
        mr.readFile( fmtresults.multiruntimes_filename )
    
    warnL = []
    newtest = False
    for f in fileL:
        try:
            fmt,vers,hdr,nskip = fmtresults.read_file_header( f )
        except Exception:
            warnL.append( "skipping results file: " + f + \
                          ", Exception = " + str(sys.exc_info()[1]) )
        else:
            
            if fmt and fmt == 'results':
                if fmtresults.merge_results_file( mr, f, warnL, dcut, xopt, wopt ):
                    newtest = True
            
            elif fmt and fmt == 'multi':
                if fmtresults.merge_multi_file( mr, f, warnL, dcut, xopt, wopt ):
                    newtest = True
            
            else:
                warnL.append( "skipping results source file due to " + \
                              "corrupt or unknown format: " + f )
    
    if newtest:
        mr.writeFile( fmtresults.multiruntimes_filename )
    
    return warnL
Beispiel #4
0
def get_multi_results_test_attributes(filename, testname, platid=None):
    ""
    mr = fmtresults.MultiResults()
    mr.readFile(filename)

    rootrel, testkey = os.path.split(testname)

    pL = mr.platformList(rootrel, testkey)

    if len(pL) > 0:
        if platid == None:
            platid = pL[0]
        else:
            assert platid in pL

        aD = mr.testAttrs(rootrel, testkey, platid)

        return aD

    else:
        return {}
Beispiel #5
0
def results_listing( fname, optD ):
    """
    by default, lists the tests by date
    the -p option means list the platform/compilers referenced by at least one
    test
    """
    fmt,vers,hdr,nskip = fmtresults.read_file_header( fname )
    
    if fmt and fmt == 'results':
      src = fmtresults.TestResults()
      src.readResults(fname)
      
      if '-p' in optD:
        p = hdr.get( 'PLATFORM', '' )
        c = hdr.get( 'COMPILER', '' )
        if p or c:
          print3( p+'/'+c )
      
      else:
        tL = []
        for d in src.dirList():
          for tn in src.testList(d):
            aD = src.testAttrs(d,tn)
            if 'xdate' in aD:
              tL.append( ( aD['xdate'], tn, d, aD ) )
        tL.sort()
        tL.reverse()
        for xdate,tn,d,aD in tL:
          print3( fmtresults.make_attr_string(aD), d+'/'+tn )
    
    elif fmt and fmt == 'multi':
      src = fmtresults.MultiResults()
      src.readFile(fname)
      
      if '-p' in optD:
        pcD = {}
        for d in src.dirList():
          for tn in src.testList(d):
            for pc in src.platformList(d,tn):
              pcD[pc] = None
        pcL = list( pcD.keys() )
        pcL.sort()
        for pc in pcL:
          print3( pc )
      
      else:
        tL = []
        for d in src.dirList():
          for tn in src.testList(d):
            for pc in src.platformList(d,tn):
              aD = src.testAttrs(d,tn,pc)
              if 'xdate' in aD:
                tL.append( ( aD['xdate'], tn, d, pc, aD ) )
        tL.sort()
        tL.reverse()
        for xdate,tn,d,pc,aD in tL:
          print3( fmtresults.make_attr_string(aD), pc, d+'/'+tn )
    
    else:
      sys.stderr.write( "Cannot list due to unknown file format: " + \
                        fname + os.linesep )
Beispiel #6
0
def write_runtimes( optD, fileL ):
    """
    Read test results from the list of files in 'fileL' and write to runtimes
    files in the test source tree.
    
    The list of files in 'fileL' can be either in multi-platform format or
    single platform test results format.
    
    Since each test may have multiple entries in the 'fileL' list, the run
    time of each entry is averaged, and the average is used as the run time for
    the test.
    
    If the test source root directory cannot be determined (by looking for
    an existing runtimes file), then the current working directory is assumed
    to be the root directory, and is marked as such by the new runtimes file.

    If a runtimes file does not exist in the current directory, one will be
    created.
    
    Existing runtimes files in subdirectories of the current directory are
    updated as well as the one in the current directory.
    
    New test entries in existing runtimes files may be added but none are
    removed.  If a test is contained in the 'fileL' list and in an existing
    runtimes file, then the entry is overwritten with the 'fileL' value in the
    runtimes file.
    """
    warnL = []
    
    cwd = os.getcwd()
    rootrel = fmtresults.file_rootrel( cwd )
    if rootrel == None:
        # assume the current directory is the test tree root directory
        rootrel = os.path.basename( cwd )
    
    # for each (test dir, test key) pair, store a list of tests attr dicts
    testD = {}
    
    # read the tests from the source files; only save the tests that are
    # subdirectories of the rootrel (or equal to the rootrel)
    rrdirL = rootrel.split('/')
    rrlen = len(rrdirL)
    for srcf in fileL:
      try:
        fmt,vers,hdr,nskip = fmtresults.read_file_header( srcf )
      except Exception:
        warnL.append( "Warning: skipping results file: " + srcf + \
                     ", Exception = " + str(sys.exc_info()[1]) )
      else:
        if fmt and fmt == 'results':
          src = fmtresults.TestResults()
          try:
            src.readResults(srcf)
          except Exception:
            warnL.append( "Warning: skipping results file: " + srcf + \
                         ", Exception = " + str(sys.exc_info()[1]) )
          else:
            for d in src.dirList():
              if d.split('/')[:rrlen] == rrdirL:
                for tn in src.testList(d):
                  aD = src.testAttrs( d, tn )
                  if aD.get('result','') in ['pass','diff']:
                    k = (d,tn)
                    if k in testD: testD[k].append(aD)
                    else:          testD[k] = [aD]
        elif fmt and fmt == 'multi':
          src = fmtresults.MultiResults()
          try:
            src.readFile(srcf)
          except Exception:
            warnL.append( "Warning: skipping results file: " + srcf + \
                         ", Exception = " + str(sys.exc_info()[1]) )
          else:
            for d in src.dirList():
              if d.split('/')[:rrlen] == rrdirL:
                for tn in src.testList(d):
                  for pc in src.platformList( d, tn ):
                    aD = src.testAttrs( d, tn, pc )
                    if aD.get('result','') in ['pass','diff']:
                      k = (d,tn)
                      if k in testD: testD[k].append(aD)
                      else:          testD[k] = [aD]
        else:
          warnL.append( "Warning: skipping results source file due to error: " + \
                       srcf + ", corrupt or unknown format" )
    
    # for each test, average the times found in the source files
    avgD = {}
    for k,aL in testD.items():
      d,tn = k
      tsum = 0
      tnum = 0
      save_aD = None
      for aD in aL:
        t = aD.get( 'xtime', 0 )
        if t > 0:
          tsum += t
          tnum += 1
          # use the attributes of the test with the most recent date
          if 'xdate' in aD:
            if save_aD == None or save_aD['xdate'] < aD['xdate']:
              save_aD = aD
      if save_aD != None:
        t = int( tsum/tnum )
        save_aD['xtime'] = t
        avgD[k] = save_aD
    
    tr = fmtresults.TestResults()
    rtdirD = {}  # runtimes directory -> root relative path
    
    # read any existing runtimes files at or below the CWD
    def read_src_dir( trs, rtD, msgs, dirname ):
        rtf = os.path.join( dirname, fmtresults.runtimes_filename )
        if os.path.isfile(rtf):
            try:
                fmt,vers,hdr,nskip = fmtresults.read_file_header( rtf )
                rr = hdr.get( 'ROOT_RELATIVE', None )
                trs.mergeRuntimes(rtf)
            except Exception:
                msgs.append( "Warning: skipping existing runtimes file due to " + \
                             "error: " + rtf + ", Exception = " + \
                             str(sys.exc_info()[1]) )
            else:
              if rr == None:
                  msgs.append( "Warning: skipping existing runtimes file " + \
                               "because it does not contain the ROOT_RELATIVE " + \
                               "specification: " + rtf )
              else:
                  rtD[dirname] = rr

    for root,dirs,files in os.walk( cwd ):
        read_src_dir( tr, rtdirD, warnL, root )

    if '-w' in optD:
      # the -w option means don't merge
      tr = fmtresults.TestResults()
    
    # merge in the tests with average timings
    for k,aD in avgD.items():
      d,tn = k
      tr.addTestName( d, tn, aD )
    
    # make sure top level is included then write out the runtimes files
    rtdirD[ cwd ] = rootrel
    for rtdir,rrel in rtdirD.items():
      tr.writeRuntimes( rtdir, rrel )
    
    return warnL