Example #1
0
 def __init__(self, cores, sockets, sleep_dt, outfile=None):
     threading.Thread.__init__(self)
     self.stop = threading.Event()
     self._regulator = None
     if outfile:
         logging.info("Logging meters to {}".format(outfile))
         self._outfile = open(outfile, 'w')
     else:
         logging.info("Not logging anything")
         self._outfile = None
     logging.info("Using cores {} and sockets {}".format(cores, sockets))
     self.cores = cores
     self.sockets = sockets
     self.measured_cores = list(set.union(cores, sockets))
     logging.info("-> Metering on {}".format(self.measured_cores))
     self.sleep_dt = sleep_dt
     logging.info("Measuring every {} seconds".format(self.sleep_dt))
     # https://github.com/RRZE-HPC/likwid/blob/master/groups/broadwellEP/MEM_DP.txt
     estr = "FP_ARITH_INST_RETIRED_128B_PACKED_DOUBLE:PMC0,"
     estr += "FP_ARITH_INST_RETIRED_SCALAR_DOUBLE:PMC1,"
     estr += "FP_ARITH_INST_RETIRED_256B_PACKED_DOUBLE:PMC2,"
     estr += "CAS_COUNT_RD:MBOX0C0,"
     estr += "CAS_COUNT_WR:MBOX0C1,"
     estr += "CAS_COUNT_RD:MBOX1C0,"
     estr += "CAS_COUNT_WR:MBOX1C1,"
     estr += "CAS_COUNT_RD:MBOX2C0,"
     estr += "CAS_COUNT_WR:MBOX2C1,"
     estr += "CAS_COUNT_RD:MBOX3C0,"
     estr += "CAS_COUNT_WR:MBOX3C1,"
     estr += "CAS_COUNT_RD:MBOX4C0,"
     estr += "CAS_COUNT_WR:MBOX4C1,"
     estr += "CAS_COUNT_RD:MBOX5C0,"
     estr += "CAS_COUNT_WR:MBOX5C1,"
     estr += "CAS_COUNT_RD:MBOX6C0,"
     estr += "CAS_COUNT_WR:MBOX6C1,"
     estr += "CAS_COUNT_RD:MBOX7C0,"
     estr += "CAS_COUNT_WR:MBOX7C1,"
     estr += "PWR_PKG_ENERGY:PWR0,"
     estr += "PWR_DRAM_ENERGY:PWR3,"
     estr += "UNCORE_CLOCK:UBOXFIX"
     pylikwid.init(self.measured_cores)
     logging.info("initialized {} threads".format(
         pylikwid.getnumberofthreads()))
     self._gid = pylikwid.addeventset(estr)
     pylikwid.setup(self._gid)
Example #2
0
import pylikwid
import sys
import numpy as np

cpus = [0,1]
eventset = "CACHE"

arr=np.random.uniform(size=1000000)
print(arr.shape)
pylikwid.inittopology()
cputopo = pylikwid.getcputopology()
print(cputopo['activeHWThreads'])
thr = cputopo['threadPool'][0]
pylikwid.finalizetopology()

err = pylikwid.init(cpus)
if err > 0:
    print("Cannot initialize LIKWID")
    sys.exit(1)
group = pylikwid.addeventset(eventset)
if group >= 0:
    print("Eventset {} added with ID {}".format(eventset, group,))
else:
    print("Failed to add eventset {}".format(eventset))
    sys.exit(1)
err = pylikwid.setup(group)
if err < 0:
    print("Setup of group {} failed".format(group))
    sys.exit(1)
err = pylikwid.start()
Example #3
0
#!/usr/bin/env python

import pylikwid

liste = []
cpus = [0,1]

pylikwid.init(cpus)
group = pylikwid.addeventset("INSTR_RETIRED_ANY:FIXC0")
pylikwid.setup(group)
pylikwid.start()
for i in range(0,1000000):
    liste.append(i)
pylikwid.stop()
for thread in range(0,len(cpus)):
    print("Result CPU %d : %f" % (cpus[thread], pylikwid.getresult(group,0,thread)))
pylikwid.finalize()
Example #4
0
ret = pylikwid.hpminit()
if not ret:
    print('Failed to initialize access layer for LIKWID')
    sys.exit(1)

ret = pylikwid.inittopology()
if not ret:
    print('Failed to initialize LIKWID topology module')
    sys.exit(1)

topo = pylikwid.getcputopology()
for t in topo["threadPool"].keys():
    cpus.append(topo["threadPool"][t]["apicId"])

ret = pylikwid.init(cpus)
if ret != 0:
    print('Failed to initialize LIKWID perfmon module')
    sys.exit(1)
os.environ["LIKWID_FORCE"] = "1"

if pylikwid.setverbosity(0) != 0:
    print('Failed to set verbosity')
    sys.exit(1)

run = True
try:
    while run:
        for grp in groups:
            if not gids.has_key(grp):
                gid = pylikwid.addeventset(grp)
Example #5
0
    def tune(self,
             n_trial,
             measure_option,
             early_stopping=None,
             callbacks=(),
             si_prefix='G',
             likwid_event=None,
             save_features=False,
             random=False):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        si_prefix: str
            One of tvm.autotvm.util.SI_PREFIXES. The SI prefix to use when reporting FLOPS.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, 'n_parallel', 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping
        start_time = time.time()

        # Validate si_prefix arg
        format_si_prefix(0, si_prefix)

        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        i = error_ct = 0

        if likwid_event != None:
            # Get arrays for conv
            N, CI, H, W = self.task.args[0][1]
            CO, _, KH, KW = self.task.args[1][1]
            padding = self.task.args[3]

        ctx = tvm.context(self.task.target.__str__(), 0)
        #a_tvm = tvm.nd.array(np.random.uniform(size=(N,CI,H,W) ).astype(np.float32), ctx)
        #w_tvm = tvm.nd.array(np.random.uniform(size=(CO,CI,KH,KW) ).astype(np.float32), ctx)
        #c_tvm = tvm.nd.array(np.zeros((N,CO,H+KH-2*padding-1,W+KW-2*padding-1), dtype=np.float32), ctx)

        while i < n_trial:
            if not self.has_next():
                break

            if random:
                configs = self.random_next_batch(min(n_parallel, n_trial - i))
            else:
                configs = self.next_batch(min(n_parallel, n_trial - i))

            inputs = [
                MeasureInput(self.task.target, self.task, config)
                for config in configs
            ]
            results = measure_batch(inputs)

            # keep best config
            for k, (inp, res) in enumerate(zip(inputs, results)):
                config = inp.config
                if res.error_no == 0:
                    flops = inp.task.flop / np.mean(res.costs)
                    error_ct = 0
                else:
                    flops = 0
                    error_ct += 1

                if flops > self.best_flops:
                    self.best_flops = flops
                    self.best_config = config
                    self.best_measure_pair = (inp, res)
                    self.best_iter = i + k

                logger.debug("No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s",
                             i + k + 1, si_prefix,
                             format_si_prefix(flops, si_prefix),
                             format_si_prefix(self.best_flops,
                                              si_prefix), res, config)

            i += len(results)
            self.ttl = min(early_stopping + self.best_iter, n_trial) - i

            if random:
                self.update_random(inputs, results)
            else:
                self.update(inputs, results)

            if likwid_event != None:
                pylikwid.inittopology()
                cpu_topo = pylikwid.getcputopology()
                cpus = list(range(cpu_topo['activeHWThreads']))
                pylikwid.finalizetopology()

                err = pylikwid.init(cpus)
                group = pylikwid.addeventset(likwid_event)
                err = pylikwid.setup(group)

                for k, (inp, res) in enumerate(zip(inputs, results)):
                    with inp.target:
                        sch, args = self.task.instantiate(inp.config)
                        #with tvm.ir.transform.PassContext():
                        func = tvm.build(sch,
                                         args,
                                         target_host=inp.task.target_host)
                        evaluator = func.time_evaluator(func.entry_name,
                                                        ctx,
                                                        repeat=3,
                                                        number=4)

                    dshape = (N, CI // inp.config['tile_ic'].size[-1], H, W,
                              inp.config['tile_ic'].size[-1])
                    kshape = (CO // inp.config['tile_oc'].size[-1],
                              CI // inp.config['tile_ic'].size[-1], KH, KW,
                              inp.config['tile_ic'].size[-1],
                              inp.config['tile_oc'].size[-1])
                    oshape = (N, CO // inp.config['tile_oc'].size[-1],
                              H + KH - 2 * padding - 1,
                              W + KW - 2 * padding - 1,
                              inp.config['tile_oc'].size[-1])
                    a_tvm = tvm.nd.array(
                        np.random.uniform(size=dshape).astype(np.float32), ctx)
                    w_tvm = tvm.nd.array(
                        np.random.uniform(size=kshape).astype(np.float32), ctx)
                    c_tvm = tvm.nd.array(np.zeros(oshape, dtype=np.float32),
                                         ctx)
                    ##Warm up ### I tried this warm up and running the function once,
                    #             likwid results were very bad, resulted in barely better than
                    #             random when training RandForest model on post-tuning data
                    #if tuple(args[1].shape) == w_tvm.shape:
                    #    for _ in range(10):
                    #        func(c_tvm, w_tvm, a_tvm)
                    #else:
                    #    for _ in range(10):
                    #        func(c_tvm, a_tvm, w_tvm)

                    #LIKWID PERFCTR
                    err = pylikwid.start()
                    if tuple(args[1].shape) == w_tvm.shape:
                        evaluator(c_tvm, w_tvm, a_tvm)
                    else:
                        evaluator(c_tvm, a_tvm, w_tvm)
                    err = pylikwid.stop()

                    likwid_results = []
                    for thread in range(0, len(cpus)):
                        likwid_results.append({})
                        for event_num in range(
                                pylikwid.getnumberofevents(group)):
                            key = pylikwid.getnameofevent(group, event_num)
                            if key in likwid_results[-1].keys():
                                likwid_results[-1][key] += pylikwid.getresult(
                                    group, event_num, thread)
                            else:
                                likwid_results[-1][key] = pylikwid.getresult(
                                    group, event_num, thread)
                    #END LIKWID PERFCTR

                    if inp.config.index in self.cost_model.saved_features.keys(
                    ):
                        self.cost_model.saved_features[
                            inp.config.index].set_result(res)
                        self.cost_model.saved_features[
                            inp.config.index].set_counters(likwid_results)
                    else:
                        self.cost_model.saved_features[
                            inp.config.index] = SavedFeature(
                                result=res, counters=likwid_results)
                pylikwid.finalize()
            elif save_features == True:
                for k, (inp, res) in enumerate(zip(inputs, results)):
                    if inp.config.index in self.cost_model.saved_features.keys(
                    ):
                        self.cost_model.saved_features[
                            inp.config.index].set_result(res)
                    else:
                        self.cost_model.saved_features[
                            inp.config.index] = SavedFeature(result=res)
            if len(self.cost_model.saved_features['scores']) > 0:
                self.cost_model.saved_features['scores'][-1].append(
                    time.time() - start_time)

            for callback in callbacks:
                callback(self, inputs, results)

            if i >= self.best_iter + early_stopping:
                logger.debug("Early stopped. Best iter: %d.", self.best_iter)
                break

            if error_ct > 150:
                logging.basicConfig()
                logger.warning(
                    "Too many errors happen in the tuning. Now is in debug mode"
                )
                logger.setLevel(logging.DEBUG)
            else:
                logger.setLevel(old_level)

        GLOBAL_SCOPE.in_tuning = False
        del measure_batch