Beispiel #1
0
    def _WorkloadSetup(self):
        'calculate arguments based on input parameters'
        (mem, units) = self.memsize
        if units == 'KB':
            mem = mem / (1024.0 * 1024.0)
        elif units == 'MB':
            mem = mem / 1024.0
        elif units == 'TB':
            mem = mem * 1024
        ratio = float(mem) / float(self.num_cpus)
        if ratio >= 0.75:
            mult = float(self._cfg.setdefault('jobspercore', 2))
        else:
            self._log(Log.INFO, "Low memory system (%f GB/core)! Not running" % ratio)
            mult = 0
            self._donotrun = True

        sysTop = SysTopology()
        # get the number of nodes
        self.nodes = sysTop.getnodes()

        # get the cpus for each node
        self.cpus = {}
        biggest = 0
        for n in sysTop.getnodes():
            self.cpus[n] = sysTop.getcpus(int(n))
            # if a cpulist was specified, only allow cpus in that list on the node
            if self.cpulist:
                self.cpus[n] = [c for c in self.cpus[n] if str(c) in expand_cpulist(self.cpulist)]

            # track largest number of cpus used on a node
            node_biggest = len(sysTop.getcpus(int(n)))
            if node_biggest > biggest:
                biggest = node_biggest

        # remove nodes with no cpus available for running
        for node, cpus in list(self.cpus.items()):
            if not cpus:
                self.nodes.remove(node)
                self._log(Log.DEBUG, "node %s has no available cpus, removing" % node)

        # setup jobs based on the number of cores available per node
        self.jobs = biggest * 3

        # figure out if we can use numactl or have to use taskset
        self.__usenumactl = False
        self.__multinodes = False
        if len(self.nodes) > 1:
            self.__multinodes = True
            self._log(Log.INFO, "running with multiple nodes (%d)" % len(self.nodes))
            if os.path.exists('/usr/bin/numactl') and not self.cpulist:
                self.__usenumactl = True
                self._log(Log.INFO, "using numactl for thread affinity")

        self.args = ['hackbench', '-P',
                     '-g', str(self.jobs),
                     '-l', str(self._cfg.setdefault('loops', '1000')),
                     '-s', str(self._cfg.setdefault('datasize', '1000'))
                     ]
        self.__err_sleep = 5.0
Beispiel #2
0
    def _WorkloadSetup(self):
        # find our source tarball
        if 'tarball' in self._cfg:
            tarfile = os.path.join(self.srcdir, self._cfg.tarfile)
            if not os.path.exists(tarfile):
                raise rtevalRuntimeError(self, " tarfile %s does not exist!" % tarfile)
            self.source = tarfile
        else:
            tarfiles = glob.glob(os.path.join(self.srcdir, "%s*" % kernel_prefix))
            if tarfiles:
                self.source = tarfiles[0]
            else:
                raise rtevalRuntimeError(self, " no kernel tarballs found in %s" % self.srcdir)

        # check for existing directory
        kdir = None
        names = os.listdir(self.builddir)
        for d in names:
            if d.startswith(kernel_prefix):
                kdir = d
                break
        if kdir is None:
            self._extract_tarball()
            names = os.listdir(self.builddir)
            for d in names:
                self._log(Log.DEBUG, "checking %s" % d)
                if d.startswith(kernel_prefix):
                    kdir = d
                    break
        if kdir is None:
            raise rtevalRuntimeError(self, "Can't find kernel directory!")
        self.mydir = os.path.join(self.builddir, kdir)
        self._log(Log.DEBUG, "mydir = %s" % self.mydir)
        self._log(Log.DEBUG, "systopology: %s" % self.topology)
        self.jobs = len(self.topology)
        self.args = []

        # get the cpus for each node
        self.cpus = {}
        self.nodes = self.topology.getnodes()
        for n in self.nodes:
            self.cpus[n] = [int(c.split('/')[-1][3:]) for c in glob.glob('/sys/devices/system/node/node%s/cpu[0-9]*' % n)]
            self.cpus[n].sort()

            # if a cpulist was specified, only allow cpus in that list on the node
            if self.cpulist:
                self.cpus[n] = [c for c in self.cpus[n] if str(c) in expand_cpulist(self.cpulist)]

        # remove nodes with no cpus available for running
        for node, cpus in self.cpus.items():
            if not cpus:
                self.nodes.remove(node)
                self._log(Log.DEBUG, "node %s has no available cpus, removing" % node)

        for n in self.nodes:
            self._log(Log.DEBUG, "Configuring build job for node %d" % int(n))
            self.buildjobs[n] = KBuildJob(self.topology[n], self.mydir, \
                self.logger, self.cpus[n] if self.cpulist else None)
            self.args.append(str(self.buildjobs[n])+";")
Beispiel #3
0
    def _WorkloadSetup(self):
        'calculate arguments based on input parameters'
        (mem, units) = self.memsize
        if units == 'KB':
            mem = mem / (1024.0 * 1024.0)
        elif units == 'MB':
            mem = mem / 1024.0
        elif units == 'TB':
            mem = mem * 1024
        ratio = float(mem) / float(self.num_cpus)
        if ratio >= 0.75:
            mult = float(self._cfg.setdefault('jobspercore', 2))
        else:
            self._log(Log.INFO, "Low memory system (%f GB/core)! Not running" % ratio)
            mult = 0
            self._donotrun = True

        # figure out how many nodes we have
        self.nodes = [ n.split('/')[-1][4:] for n in glob.glob('/sys/devices/system/node/node*') ]


        # get the cpus for each node
        self.cpus = {}
        biggest = 0
        for n in self.nodes:
            self.cpus[n] = [ int(c.split('/')[-1][3:]) for c in glob.glob('/sys/devices/system/node/node%s/cpu[0-9]*' % n) ]
            self.cpus[n].sort()

            # if a cpulist was specified, only allow cpus in that list on the node
            if self.cpulist:
                self.cpus[n] = [ c for c in self.cpus[n] if c in expand_cpulist(self.cpulist) ]

            # track largest number of cpus used on a node
            if len(self.cpus[n]) > biggest:
                biggest = len(self.cpus[n])

        # setup jobs based on the number of cores available per node
        self.jobs = biggest * 3

        # figure out if we can use numactl or have to use taskset
        self.__usenumactl = False
        self.__multinodes = False
        if len(self.nodes) > 1:
            self.__multinodes = True
            self._log(Log.INFO, "running with multiple nodes (%d)" % len(self.nodes))
            if os.path.exists('/usr/bin/numactl'):
                self.__usenumactl = True
                self._log(Log.INFO, "using numactl for thread affinity")

        self.args = ['hackbench',  '-P',
                     '-g', str(self.jobs),
                     '-l', str(self._cfg.setdefault('loops', '1000')),
                     '-s', str(self._cfg.setdefault('datasize', '1000'))
                     ]
        self.__err_sleep = 5.0
Beispiel #4
0
    def _WorkloadPrepare(self):
        self.__nullfd = os.open("/dev/null", os.O_RDWR)
        if self._logging:
            self.__outfd = self.open_logfile("kcompile.stdout")
            self.__errfd = self.open_logfile("kcompile.stderr")
        else:
            self.__outfd = self.__errfd = self.__nullfd

        if 'cpulist' in self._cfg and self._cfg.cpulist:
            cpulist = self._cfg.cpulist
            self.num_cpus = len(expand_cpulist(cpulist))
        else:
            cpulist = ""
Beispiel #5
0
    def __init__(self, config, logger=None):
        rtevalModulePrototype.__init__(self, 'measurement', 'cyclictest',
                                       logger)
        self.__cfg = config

        # Create a RunData object per CPU core
        self.__numanodes = int(self.__cfg.setdefault('numanodes', 0))
        self.__priority = int(self.__cfg.setdefault('priority', 95))
        self.__buckets = int(self.__cfg.setdefault('buckets', 2000))
        self.__numcores = 0
        self.__cpus = []
        self.__cyclicdata = {}
        self.__sparse = False

        if self.__cfg.cpulist:
            self.__cpulist = self.__cfg.cpulist
            self.__cpus = expand_cpulist(self.__cpulist)
            self.__sparse = True
        else:
            self.__cpus = online_cpus()

        self.__numcores = len(self.__cpus)

        info = cpuinfo()

        # create a RunData object for each core we'll measure
        for core in self.__cpus:
            self.__cyclicdata[core] = RunData(core,
                                              'core',
                                              self.__priority,
                                              logfnc=self._log)
            self.__cyclicdata[core].description = info[core]['model name']

        # Create a RunData object for the overall system
        self.__cyclicdata['system'] = RunData('system',
                                              'system',
                                              self.__priority,
                                              logfnc=self._log)
        self.__cyclicdata['system'].description = (
            "(%d cores) " % self.__numcores) + info['0']['model name']

        if self.__sparse:
            self._log(Log.DEBUG, "system using %d cpu cores" % self.__numcores)
        else:
            self._log(Log.DEBUG, "system has %d cpu cores" % self.__numcores)
        self.__started = False
        self.__cyclicoutput = None
        self.__breaktraceval = None
Beispiel #6
0
    def _WorkloadPrepare(self):
        " Set-up logging "
        self.__nullfp = os.open("/dev/null", os.O_RDWR)
        self.__in = self.__nullfp
        if self._logging:
            self.__out = self.open_logfile("stressng.stdout")
            self.__err = self.open_logfile("stressng.stderr")
        else:
            self.__out = self.__err = self.__nullfp

        # stress-ng is only run if the user specifies an option
        self.args = ['stress-ng']
        self.args.append('--%s' % str(self.cfg.option))
        if self.cfg.arg is not None:
            self.args.append(self.cfg.arg)
        if self.cfg.timeout is not None:
            self.args.append('--timeout %s' % str(self.cfg.timeout))

        systop = SysTopology()
        # get the number of nodes
        nodes = systop.getnodes()

        # get the cpus for each node
        cpus = {}
        for n in nodes:
            cpus[n] = systop.getcpus(int(n))
            # if a cpulist was specified, only allow cpus in that list on the node
            if self.cpulist:
                cpus[n] = [
                    c for c in cpus[n]
                    if str(c) in expand_cpulist(self.cpulist)
                ]

        # remove nodes with no cpus available for running
        for node, cpu in list(cpus.items()):
            if not cpu:
                nodes.remove(node)
                self._log(Log.DEBUG,
                          "node %s has no available cpus, removing" % node)
        if self.cpulist:
            for node in nodes:
                cpulist = ",".join([str(n) for n in cpus[node]])
                self.args.append('--taskset %s' % cpulist)
Beispiel #7
0
    def _WorkloadSetup(self):
        'calculate arguments based on input parameters'
        (mem, units) = self.memsize
        if units == 'KB':
            mem = mem / (1024.0 * 1024.0)
        elif units == 'MB':
            mem = mem / 1024.0
        elif units == 'TB':
            mem = mem * 1024
        ratio = float(mem) / float(self.num_cpus)
        if ratio >= 0.75:
            mult = float(self._cfg.setdefault('jobspercore', 2))
        else:
            self._log(Log.INFO,
                      "Low memory system (%f GB/core)! Not running" % ratio)
            mult = 0
            self._donotrun = True

        # figure out how many nodes we have
        self.nodes = [
            n.split('/')[-1][4:]
            for n in glob.glob('/sys/devices/system/node/node*')
        ]

        # get the cpus for each node
        self.cpus = {}
        biggest = 0
        for n in self.nodes:
            self.cpus[n] = [
                int(c.split('/')[-1][3:]) for c in glob.glob(
                    '/sys/devices/system/node/node%s/cpu[0-9]*' % n)
            ]
            self.cpus[n].sort()

            # if a cpulist was specified, only allow cpus in that list on the node
            if self.cpulist:
                self.cpus[n] = [
                    c for c in self.cpus[n]
                    if c in expand_cpulist(self.cpulist)
                ]

            # track largest number of cpus used on a node
            if len(self.cpus[n]) > biggest:
                biggest = len(self.cpus[n])

        # setup jobs based on the number of cores available per node
        self.jobs = biggest * 3

        # figure out if we can use numactl or have to use taskset
        self.__usenumactl = False
        self.__multinodes = False
        if len(self.nodes) > 1:
            self.__multinodes = True
            self._log(Log.INFO,
                      "running with multiple nodes (%d)" % len(self.nodes))
            if os.path.exists('/usr/bin/numactl'):
                self.__usenumactl = True
                self._log(Log.INFO, "using numactl for thread affinity")

        self.args = [
            'hackbench', '-P', '-g',
            str(self.jobs), '-l',
            str(self._cfg.setdefault('loops', '1000')), '-s',
            str(self._cfg.setdefault('datasize', '1000'))
        ]
        self.__err_sleep = 5.0