Exemple #1
0
if len(groups) == 0:
    print("You have to add a performance group name to 'groups' list")
    sys.exit(1)
if sleeptime <= 0:
    print("Sleep time must be greater than zero")
    sys.exit(1)

gids = {}
cpus = []

ret = pylikwid.hpminit()
if not ret:
    print('Failed to initialize access layer for LIKWID')
    sys.exit(1)

ret = pylikwid.inittopology()
if not ret:
    print('Failed to initialize LIKWID topology module')
    sys.exit(1)

topo = pylikwid.getcputopology()
for t in topo["threadPool"].keys():
    cpus.append(topo["threadPool"][t]["apicId"])

ret = pylikwid.init(cpus)
if ret != 0:
    print('Failed to initialize LIKWID perfmon module')
    sys.exit(1)
os.environ["LIKWID_FORCE"] = "1"

if pylikwid.setverbosity(0) != 0:
Exemple #2
0
#!/usr/bin/env python

import pylikwid

pylikwid.inittopology()
topodict = pylikwid.getcputopology()
freqlist = pylikwid.getavailfreqs(0)
govlist = pylikwid.getavailgovs(0)
print("Available frequencies for CPU core 0:\n{}\n".format(freqlist))
minfreq = float(pylikwid.getcpuclockmin(0))
maxfreq = float(pylikwid.getcpuclockmax(0))
print("Available CPU governors for CPU core 0:\n{}\n".format(govlist))

for idx in topodict["threadPool"]:
    cpu = topodict["threadPool"][idx]["apicId"]
    print("CPU {} : {} Hz (min: {}, max: {}, gov: {})".format(
        cpu, pylikwid.getcpuclockcurrent(cpu), pylikwid.getcpuclockmin(cpu),
        pylikwid.getcpuclockmax(cpu), pylikwid.getgovernor(cpu)))
minunc = int(pylikwid.getuncoreclockmin(0) / 1E6)
maxunc = int(pylikwid.getuncoreclockmax(0) / 1E6)
print("\nUncore frequencies:")
for socket in range(topodict["numSockets"]):
    print("Socket {} : min: {} MHz, max: {} MHz".format(
        socket, pylikwid.getuncoreclockmin(socket),
        pylikwid.getuncoreclockmax(socket)))

print("\nSet frequency of CPU 1 to minimum {} MHz:".format(
    int(float(minfreq) / 1E6)))
pylikwid.setcpuclockmin(1, int(float(minfreq) / 1E3))
pylikwid.setcpuclockmax(1, int(float(minfreq) / 1E3))
print("CPU {} : {} Hz (min: {}, max: {}, gov: {})".format(
Exemple #3
0
    def tune(self,
             n_trial,
             measure_option,
             early_stopping=None,
             callbacks=(),
             si_prefix='G',
             likwid_event=None,
             save_features=False,
             random=False):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        si_prefix: str
            One of tvm.autotvm.util.SI_PREFIXES. The SI prefix to use when reporting FLOPS.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, 'n_parallel', 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping
        start_time = time.time()

        # Validate si_prefix arg
        format_si_prefix(0, si_prefix)

        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        i = error_ct = 0

        if likwid_event != None:
            # Get arrays for conv
            N, CI, H, W = self.task.args[0][1]
            CO, _, KH, KW = self.task.args[1][1]
            padding = self.task.args[3]

        ctx = tvm.context(self.task.target.__str__(), 0)
        #a_tvm = tvm.nd.array(np.random.uniform(size=(N,CI,H,W) ).astype(np.float32), ctx)
        #w_tvm = tvm.nd.array(np.random.uniform(size=(CO,CI,KH,KW) ).astype(np.float32), ctx)
        #c_tvm = tvm.nd.array(np.zeros((N,CO,H+KH-2*padding-1,W+KW-2*padding-1), dtype=np.float32), ctx)

        while i < n_trial:
            if not self.has_next():
                break

            if random:
                configs = self.random_next_batch(min(n_parallel, n_trial - i))
            else:
                configs = self.next_batch(min(n_parallel, n_trial - i))

            inputs = [
                MeasureInput(self.task.target, self.task, config)
                for config in configs
            ]
            results = measure_batch(inputs)

            # keep best config
            for k, (inp, res) in enumerate(zip(inputs, results)):
                config = inp.config
                if res.error_no == 0:
                    flops = inp.task.flop / np.mean(res.costs)
                    error_ct = 0
                else:
                    flops = 0
                    error_ct += 1

                if flops > self.best_flops:
                    self.best_flops = flops
                    self.best_config = config
                    self.best_measure_pair = (inp, res)
                    self.best_iter = i + k

                logger.debug("No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s",
                             i + k + 1, si_prefix,
                             format_si_prefix(flops, si_prefix),
                             format_si_prefix(self.best_flops,
                                              si_prefix), res, config)

            i += len(results)
            self.ttl = min(early_stopping + self.best_iter, n_trial) - i

            if random:
                self.update_random(inputs, results)
            else:
                self.update(inputs, results)

            if likwid_event != None:
                pylikwid.inittopology()
                cpu_topo = pylikwid.getcputopology()
                cpus = list(range(cpu_topo['activeHWThreads']))
                pylikwid.finalizetopology()

                err = pylikwid.init(cpus)
                group = pylikwid.addeventset(likwid_event)
                err = pylikwid.setup(group)

                for k, (inp, res) in enumerate(zip(inputs, results)):
                    with inp.target:
                        sch, args = self.task.instantiate(inp.config)
                        #with tvm.ir.transform.PassContext():
                        func = tvm.build(sch,
                                         args,
                                         target_host=inp.task.target_host)
                        evaluator = func.time_evaluator(func.entry_name,
                                                        ctx,
                                                        repeat=3,
                                                        number=4)

                    dshape = (N, CI // inp.config['tile_ic'].size[-1], H, W,
                              inp.config['tile_ic'].size[-1])
                    kshape = (CO // inp.config['tile_oc'].size[-1],
                              CI // inp.config['tile_ic'].size[-1], KH, KW,
                              inp.config['tile_ic'].size[-1],
                              inp.config['tile_oc'].size[-1])
                    oshape = (N, CO // inp.config['tile_oc'].size[-1],
                              H + KH - 2 * padding - 1,
                              W + KW - 2 * padding - 1,
                              inp.config['tile_oc'].size[-1])
                    a_tvm = tvm.nd.array(
                        np.random.uniform(size=dshape).astype(np.float32), ctx)
                    w_tvm = tvm.nd.array(
                        np.random.uniform(size=kshape).astype(np.float32), ctx)
                    c_tvm = tvm.nd.array(np.zeros(oshape, dtype=np.float32),
                                         ctx)
                    ##Warm up ### I tried this warm up and running the function once,
                    #             likwid results were very bad, resulted in barely better than
                    #             random when training RandForest model on post-tuning data
                    #if tuple(args[1].shape) == w_tvm.shape:
                    #    for _ in range(10):
                    #        func(c_tvm, w_tvm, a_tvm)
                    #else:
                    #    for _ in range(10):
                    #        func(c_tvm, a_tvm, w_tvm)

                    #LIKWID PERFCTR
                    err = pylikwid.start()
                    if tuple(args[1].shape) == w_tvm.shape:
                        evaluator(c_tvm, w_tvm, a_tvm)
                    else:
                        evaluator(c_tvm, a_tvm, w_tvm)
                    err = pylikwid.stop()

                    likwid_results = []
                    for thread in range(0, len(cpus)):
                        likwid_results.append({})
                        for event_num in range(
                                pylikwid.getnumberofevents(group)):
                            key = pylikwid.getnameofevent(group, event_num)
                            if key in likwid_results[-1].keys():
                                likwid_results[-1][key] += pylikwid.getresult(
                                    group, event_num, thread)
                            else:
                                likwid_results[-1][key] = pylikwid.getresult(
                                    group, event_num, thread)
                    #END LIKWID PERFCTR

                    if inp.config.index in self.cost_model.saved_features.keys(
                    ):
                        self.cost_model.saved_features[
                            inp.config.index].set_result(res)
                        self.cost_model.saved_features[
                            inp.config.index].set_counters(likwid_results)
                    else:
                        self.cost_model.saved_features[
                            inp.config.index] = SavedFeature(
                                result=res, counters=likwid_results)
                pylikwid.finalize()
            elif save_features == True:
                for k, (inp, res) in enumerate(zip(inputs, results)):
                    if inp.config.index in self.cost_model.saved_features.keys(
                    ):
                        self.cost_model.saved_features[
                            inp.config.index].set_result(res)
                    else:
                        self.cost_model.saved_features[
                            inp.config.index] = SavedFeature(result=res)
            if len(self.cost_model.saved_features['scores']) > 0:
                self.cost_model.saved_features['scores'][-1].append(
                    time.time() - start_time)

            for callback in callbacks:
                callback(self, inputs, results)

            if i >= self.best_iter + early_stopping:
                logger.debug("Early stopped. Best iter: %d.", self.best_iter)
                break

            if error_ct > 150:
                logging.basicConfig()
                logger.warning(
                    "Too many errors happen in the tuning. Now is in debug mode"
                )
                logger.setLevel(logging.DEBUG)
            else:
                logger.setLevel(old_level)

        GLOBAL_SCOPE.in_tuning = False
        del measure_batch