Exemple #1
0
def appendToCsv(d,path,filename,df,ndf,bdf):
   try:
      f = bz2.BZ2File( join(path,filename),'r')
      ms = [Record().fromString( s.rstrip('\n')) for s in f.readlines()]
      fp = FileNameParts(filename)
      appendToBulkdataCsv(ms,fp,bdf)
      appendToBothDataCsv(ms,fp,df,ndf)

   except IOError, err:
      if logger: logger.error(err)
Exemple #2
0
def appendToCsv(d, path, filename, df, ndf, bdf):
    try:
        f = bz2.BZ2File(join(path, filename), 'r')
        try:
            ms = [Record().fromString(s.rstrip('\n')) for s in f.readlines()]
        except Exception as ex:
            print join(path, filename), 'corrupted, deleting'
            os.remove(join(path, filename))
            return
        fp = FileNameParts(filename)
        appendToBulkdataCsv(ms, fp, bdf)
        appendToBothDataCsv(ms, fp, df, ndf)

    except IOError, err:
        if logger: logger.error(err)
Exemple #3
0
def measure(arg,
            commandline,
            delay,
            maxtime,
            outFile=None,
            errFile=None,
            inFile=None,
            logger=None,
            affinitymask=None):

    r, w = os.pipe()
    forkedPid = os.fork()

    if forkedPid:  # read pickled measurements from the pipe
        os.close(w)
        rPipe = os.fdopen(r)
        r = cPickle.Unpickler(rPipe)
        measurements = r.load()
        rPipe.close()
        os.waitpid(forkedPid, 0)
        return measurements

    else:
        # Sample thread will be destroyed when the forked process _exits
        class Sample(threading.Thread):
            def __init__(self, program):
                threading.Thread.__init__(self)
                self.setDaemon(1)
                self.timedout = False
                self.p = program
                self.maxMem = 0
                self.childpids = None
                self.start()

            def run(self):
                try:
                    remaining = maxtime
                    while remaining > 0:
                        mem = gtop.proc_mem(self.p).resident
                        time.sleep(delay)
                        remaining -= delay
                        # race condition - will child processes have been created yet?
                        self.maxMem = max((mem + self.childmem()) / 1024,
                                          self.maxMem)
                    else:
                        self.timedout = True
                        os.kill(self.p, signal.SIGKILL)
                except OSError, (e, err):
                    if logger: logger.error('%s %s', e, err)

            def childmem(self):
                if self.childpids == None:
                    self.childpids = set()
                    for each in gtop.proclist():
                        if gtop.proc_uid(each).ppid == self.p:
                            self.childpids.add(each)
                mem = 0
                for each in self.childpids:
                    mem += gtop.proc_mem(each).resident
                return mem

        try:

            m = Record(arg)

            # only write pickles to the pipe
            os.close(r)
            wPipe = os.fdopen(w, 'w')
            w = cPickle.Pickler(wPipe)

            # gtop cpu is since machine boot, so we need a before measurement
            cpus0 = gtop.cpu().cpus
            start = time.time()

            # spawn the program in a separate process
            p = Popen(commandline,
                      stdout=outFile,
                      stderr=errFile,
                      stdin=inFile)

            # start a thread to sample the program's resident memory use
            t = Sample(program=p.pid)

            # wait for program exit status and resource usage
            rusage = os.wait3(0)

            # gtop cpu is since machine boot, so we need an after measurement
            elapsed = time.time() - start
            cpus1 = gtop.cpu().cpus

            # summarize measurements
            if t.timedout:
                m.setTimedout()
            elif rusage[1] == os.EX_OK:
                m.setOkay()
            else:
                m.setError()

            m.userSysTime = rusage[2][0] + rusage[2][1]
            m.maxMem = t.maxMem

            load = map(
                lambda t0, t1: int(
                    round(100.0 * (1.0 - float(t1.idle - t0.idle) /
                                   (t1.total - t0.total)))), cpus0, cpus1)

            #load.sort(reverse=1) # maybe more obvious unsorted
            m.cpuLoad = ("% ".join([str(i) for i in load])) + "%"

            m.elapsed = elapsed

        except KeyboardInterrupt:
            os.kill(p.pid, signal.SIGKILL)

        except ZeroDivisionError, (e, err):
            if logger: logger.warn('%s %s', err, 'too fast to measure?')
Exemple #4
0
def measureCheckAndLogRepeatLargest(p,t,ms):
   repeatTestValue = None

   for a in testvalues:
      cmd = cmdWithArg(t,a)
      ofName = outName(0)

      # a logged Empty record will reveal a bencher failure
      m = Record() 

      try:
         with open(logName(p),'w') as logf:
            with open(ofName,'w') as of:
               # append timestamp to log
               print >>logf, '\n', strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())

               # append Make output to log
               if hasMake(p.imp):
                  with open( makeName(), 'r') as mf:
                     logf.write( mf.read() ) 

               with open(errName(),'w+') as ef:
                  # append command line showing redirected test data file to log
                  if testdata.get(testname,None):
                     dfName = dataName(testdata[testname],a)
                     print >>logf, '\nCOMMAND LINE:\n', cmd, '<', split(dfName)[1]
                     with open(dfName,'r') as df:
                        m = measure(a,qsplit(cmd),delay,maxtime,of,ef,df,
                           logger=logger,affinitymask=affinitymask)

                  # append command line showing test value argument to log
                  else:
                     print >>logf, '\nCOMMAND LINE:\n', cmd
                     m = measure(a,qsplit(cmd),delay,maxtime,of,ef,
                        logger=logger,affinitymask=affinitymask)

            # check the program output was as expected
            checkAndLog(m,ofName,logf)

            # add to the measurements
            ms.append(m)

            # if there was a problem finding the program just ignore the program output
            if not m.isMissing():
               with open(ofName,'r+') as of:
                  # append diff or ndiff or cmp output to log
                  if m.hasBadOutput():
                     with open(diffName(), 'r') as df:
                        logf.write( df.read() )

                  # append program output to log
                  if isNotChecked(testname):
                     logf.write( '\nPROGRAM OUTPUT NOT CHECKED:\n' )
                     logf.write( of.read() )
                  elif isCheckCmp(testname):
                     logf.write( '\n(BINARY) PROGRAM OUTPUT NOT SHOWN\n' )
                  elif getsize(ofName) > outputmax:
                     of.truncate(outputmax)
                     logf.write( '\n(TRUNCATED) PROGRAM OUTPUT:\n' )
                     logf.write( of.read() )
                  else:
                     logf.write( '\nPROGRAM OUTPUT:\n' )
                     logf.write( of.read() )

                  # append program stderr to log
                  if getsize(errName()):
                     with open(errName(), 'r') as ef:
                        logf.write( '\n' )
                        logf.write( ef.read() )               

         if m.isOkay():
            if m.hasExceeded(cutoff):
               repeatTestValue = None
            else:
               repeatTestValue = a
         else:
            break


      except IOError, err:
         if logger: logger.error(err)

      finally:
Exemple #5
0
def measure(arg,
            commandline,
            delay,
            maxtime,
            outFile=None,
            errFile=None,
            inFile=None,
            logger=None,
            affinitymask=None):

    r, w = os.pipe()
    forkedPid = os.fork()

    if forkedPid:  # read pickled measurements from the pipe
        os.close(w)
        rPipe = os.fdopen(r)
        r = cPickle.Unpickler(rPipe)
        measurements = r.load()
        rPipe.close()
        os.waitpid(forkedPid, 0)
        return measurements

    else:
        # Sample thread will be destroyed when the forked process _exits
        class Sample(threading.Thread):
            def __init__(self, program):
                threading.Thread.__init__(self)
                self.setDaemon(1)
                self.timedout = False
                self.p = program
                self.maxMem = 0
                self.childpids = None
                self.start()

            def run(self):
                try:
                    remaining = maxtime
                    while remaining > 0:
                        time.sleep(delay)
                        remaining -= delay
                    else:
                        self.timedout = True
                        os.kill(self.p, signal.SIGKILL)
                except OSError, (e, err):
                    if logger: logger.error('%s %s', e, err)

        try:

            m = Record(arg)

            # only write pickles to the pipe
            os.close(r)
            wPipe = os.fdopen(w, 'w')
            w = cPickle.Pickler(wPipe)

            start = time.time()

            # spawn the program in a separate process
            p = Popen(commandline,
                      stdout=outFile,
                      stderr=errFile,
                      stdin=inFile)

            # start a thread to sample the program's resident memory use
            t = Sample(program=p.pid)

            # wait for program exit status and resource usage
            rusage = os.wait3(0)

            elapsed = time.time() - start

            # summarize measurements
            if t.timedout:
                m.setTimedout()
            elif rusage[1] == os.EX_OK:
                m.setOkay()
            else:
                m.setError()

            m.userSysTime = rusage[2][0] + rusage[2][1]
            m.maxMem = t.maxMem
            m.cpuLoad = "%"
            m.elapsed = elapsed

        except KeyboardInterrupt:
            os.kill(p.pid, signal.SIGKILL)

        except ZeroDivisionError, (e, err):
            if logger: logger.warn('%s %s', err, 'too fast to measure?')
def measure(arg,
            commandline,
            delay,
            maxtime,
            outFile=None,
            errFile=None,
            inFile=None,
            logger=None,
            affinitymask=None):

    m = Record(arg)

    # For % CPU usage
    cpu0 = taskManagerCpuTimes()

    ### Use a JobObject so we capture data for child processes as well
    hJob = CreateJobObject(None, 'proctree')

    # For elapsed time try QueryPerformanceCounter otherwise use GetTickCount
    freq = LARGE_INTEGER()
    isCounter = windll.kernel32.QueryPerformanceFrequency(byref(freq))
    if isCounter:
        t0 = LARGE_INTEGER()
        t = LARGE_INTEGER()
        windll.kernel32.QueryPerformanceCounter(byref(t0))
    else:  # the number of milliseconds since windows started
        t0 = GetTickCount()

    try:
        # spawn the program in a separate process
        p = Popen(commandline, stdout=outFile, stderr=errFile, stdin=inFile)
        hProcess = int(p._handle)
        AssignProcessToJobObject(hJob, hProcess)

        # wait for program exit status - time out in milliseconds
        waitexit = WaitForSingleObject(hProcess, maxtime * 1000)

        # For elapsed time try QueryPerformanceCounter otherwise use GetTickCount
        if isCounter:
            windll.kernel32.QueryPerformanceCounter(byref(t))
            m.elapsed = (t.value - t0.value) / float(freq.value)
        else:  # the number of milliseconds since windows started
            t = GetTickCount()
            m.elapsed = (t - t0) / 1000.0

        if waitexit != 0:
            # terminate any child processes as well
            TerminateJobObject(hJob, -1)
            m.setTimedout()
        elif p.poll() == 0:
            m.setOkay()

            ### Use a JobObject so we capture data for child processes as well
            times = QueryInformationJobObject(
                hJob, JobObjectBasicAccountingInformation)
            #ten million - the number of 100-nanosecond units in one second
            totalusr = times['TotalUserTime'] / nanosecs100
            totalsys = times['TotalKernelTime'] / nanosecs100
            m.userSysTime = totalusr + totalsys

            ### "VM Size" seems the more appropriate measure
            ###
            # corresponds to Peak Mem Usage in Task Manager
            # mem = GetProcessMemoryInfo(hProcess)
            # m.maxMem = mem['PeakWorkingSetSize'] / 1024

            # corresponds to VM Size in Task Manager
            mem = QueryInformationJobObject(hJob,
                                            JobObjectExtendedLimitInformation)
            m.maxMem = mem['PeakJobMemoryUsed'] / 1024

            m.cpuLoad = taskManagerCpuLoad(cpu0, taskManagerCpuTimes(),
                                           totalusr)

        elif p.poll() == 2:
            m.setMissing()
        else:
            m.setError()

    except (OSError, ValueError), (e, err):
        if logger: logger.error('%s %s', e, err)
        m.setError()
def measure(arg,
            commandline,
            delay,
            maxtime,
            outFile=None,
            errFile=None,
            inFile=None,
            logger=None,
            affinitymask=None):

    # Monitor :
    # - max memory consumption
    # - user and system time
    # - voluntary and involuntary context switches
    class ProcessMonitor(threading.Thread):
        def __init__(self, process):
            threading.Thread.__init__(self)

            self.process = process
            self.maxmem = 0
            self.time = dict(user=0, system=0)
            self.ctxswitches = dict(voluntary=0, involuntary=0)

            self.setDaemon(1)
            self.start()

        def run(self):
            try:
                while time.time() < start + maxtime:
                    if not process.is_running():
                        break

                    self.maxmem = max(self.maxmem, self.currentmem())

                    self.time['user'] = self.process.get_cpu_times().user
                    self.time['system'] = self.process.get_cpu_times().system

                    self.ctxswitches[
                        'voluntary'] = self.process.get_num_ctx_switches(
                        ).voluntary
                    self.ctxswitches[
                        'involuntary'] = self.process.get_num_ctx_switches(
                        ).involuntary

                    time.sleep(delay)
            except psutil.NoSuchProcess:
                pass
            except (OSError, Exception) as e:
                if logger:
                    logger.error("%s : %s" % (e.__class__.__name__, e.message))

        def currentmem(self):
            return self.process.get_memory_info().rss \
                     + sum(map(lambda p: p.get_memory_info().rss, self.childrenproc()))

        def childrenproc(self):
            if not self.process.is_running():
                return []
            else:
                return self.process.get_children(recursive=True)

    def set_affinity_mask():
        if sys.platform.startswith("linux") or sys.platform.startswith(
                "win32"):
            if affinitymask:
                proc = psutil.Process(os.getpid())
                cpus = []

                for i in range(psutil.NUM_CPUS):
                    if affinitymask & (1 << i) > 0:
                        cpus.append(i)

                proc.set_cpu_affinity(affinitymask)

    def compute_load_per_cpu(cpus0, cpus1):
        cpus = []

        for cpu0, cpu1 in zip(cpus0, cpus1):
            cpus.append(
                int(
                    round(100.0 * (1.0 - float(cpu1.idle - cpu0.idle) /
                                   (sum(cpu1._asdict().values()) -
                                    sum(cpu0._asdict().values()))))))

        return cpus

    try:
        record = Record(arg)

        # psutil cpu is since machine boot, so we need a before measurement
        cpus0 = psutil.cpu_times(percpu=True)
        start = time.time()

        # spawn the program in a separate process
        process = psutil.Popen(commandline,
                               stdout=outFile,
                               stderr=errFile,
                               stdin=inFile,
                               preexec_fn=set_affinity_mask)

        monitor = ProcessMonitor(process)

        # wait for program exit status and resource usage
        timeout = False

        try:
            exitcode = process.wait(timeout=maxtime)
        except psutil.TimeoutExpired:
            timeout = True
            os.kill(process.pid, signal.SIGKILL)

        elapsed = time.time() - start
        cpus1 = psutil.cpu_times(percpu=True)

        # summarize measurements
        if timeout:
            record.setTimedout()
        elif exitcode == os.EX_OK:
            record.setOkay()
        else:
            record.setError()

        record.maxMem = monitor.maxmem / 1024
        record.ctxSwitches = monitor.ctxswitches
        record.cpuLoad = " ".join(
            [str(i) + "%" for i in compute_load_per_cpu(cpus0, cpus1)])

        record.time = dict(user=monitor.time['user'],
                           system=monitor.time['system'],
                           elapsed=elapsed)

    except KeyboardInterrupt:
        os.kill(process.pid, signal.SIGKILL)
    except ZeroDivisionError, (e, err):
        if logger: logger.warn('%s %s', err, 'too fast to measure?')