def __init__(self, params): self.params = params try: pyRAPL.setup() self.rapl_enabled = True self.meter_rapl = pyRAPL.Measurement("bar") self.meter_rapl.begin() except: self.rapl_enabled = False
def before(self): try: pyRAPL.setup() self.meter = pyRAPL.Measurement('bar') self.meter.begin() self.successful = True except FileNotFoundError: logging.warning( "RAPL file not found. Perhaps you are using a platform that does not support RAPL (for example Windows)" ) self.successful = False except PermissionError: logging.warning( "PermissionError occured while reading RAPL file. Fix with \"sudo chmod -R a+r /sys/class/powercap/intel-rapl\"" ) self.successful = False
def main(): parser = get_parser() args = parser.parse_args() # Measure process affinity if 'affcores' in args: os.sched_setaffinity(0, args.affcores) elif 'affsockets' in args: os.sched_setaffinity(0, get_cores(args.affsockets)) # Gets closest frequencies to the selected ones. freqs = [] if 'freqs' in args: userfs = args.freqs for freq in userfs: freqs.append( closest_frequency(freq * 1000) ) else: freqs = AVAILABLE_FREQS # Socket must be selected for pyRAPL measurement. if args.power: if 'sockets' not in args: print("ERROR: sockets must be selected to measure power.") exit() try: pyRAPL.setup( devices = [pyRAPL.Device.PKG], socket_ids = args.sockets ) except: print("ERROR: check if selected sockets exist.") exit() power_measure(args.work, freqs, args.sockets, args.dim, args.powertime, args.log) if args.time: # Get cores cores = [] if 'cores' in args: cores = args.cores elif 'sockets' in args: cores = get_cores(args.sockets) time_measure(args.work, freqs, cores, args.dim, args.rep, args.log)
def reset(self): ### General environment variables. self._reward = 0 self._done = False self._info = {} self._count = 0 ### Set random initial frequency. self._freqpos = self.RNG.choice(np.arange(len(self._frequencies))) freq = self._frequencies[self._freqpos] ### Measure power and calculate power interval (state) pyRAPL.setup(devices=[pyRAPL.Device.PKG], socket_ids=[self.SOCKET]) self._power = self.set_wait_measure(freq, 'Reset') self._state = self.get_state(self._power) return self._state
def reset(self): ### General environment variables. self._reward = 0 self._done = False self._info = {} self._count = 0 ### Decide random initial frequency. self._freqpos = self.RNG.choice(np.arange(len(self._frequencies))) freq = self._frequencies[self._freqpos] ### Initialize pYRAPL. pyRAPL.setup(devices=[pyRAPL.Device.PKG], socket_ids=[self.SOCKET]) ### Set frequency, wait and measure power. self._state[0] = self.set_wait_measure(freq, 'Reset') return self._state
def test_decorator_measureit(fs_one_socket): """ Test to measure the energy consumption of a function using the measure decorator - decorate a function with the measure decorator and use a CSVOutput - launch the function - read the produced csv file Test if: - the file contains 1 line + 1 header - a line contains the DRAM energy consumption - a line contains the PKG energy consumption """ pyRAPL.setup() csv_output = pyRAPL.outputs.CSVOutput('output.csv') @pyRAPL.measureit(output=csv_output, number=NUMBER_OF_ITERATIONS) def measurable_function(a): # Power consumption of the function write_new_energy_value(POWER_CONSUMPTION_PKG, pyRAPL.Device.PKG, 0) write_new_energy_value(POWER_CONSUMPTION_DRAM, pyRAPL.Device.DRAM, 0) return 1 + a measurable_function(1) csv_output.save() csv = open('output.csv', 'r') # flush header csv.readline() n_lines = 0 for line in csv: n_lines += 1 content = line.split(',') print(content) assert content[0] == 'measurable_function' assert content[3] == str((POWER_CONSUMPTION_PKG - PKG_0_VALUE) / NUMBER_OF_ITERATIONS) assert content[4] == str((POWER_CONSUMPTION_DRAM - DRAM_0_VALUE) / NUMBER_OF_ITERATIONS) assert n_lines == 1
def test_context_measure(fs_one_socket): """ Test to measure the energy consumption of a function using the Measurement class - launch the measure - write a new value to the RAPL power measurement api file - launch a function - end the measure Test if: - the energy consumption measured is the delta between the first and the last value in the RAPL power measurement file """ pyRAPL.setup() out = dummyOutput() with pyRAPL.Measurement('toto', output=out): measurable_function(1) assert out.data.pkg == [(POWER_CONSUMPTION_PKG - PKG_0_VALUE)] assert out.data.dram == [(POWER_CONSUMPTION_DRAM - DRAM_0_VALUE)]
def test_nomal_measure_bench(fs_one_socket): """ Test to measure the energy consumption of a function using the Measurement class - launch the measure - write a new value to the RAPL power measurement api file - launch a function - end the measure Test if: - the energy consumption measured is the delta between the first and the last value in the RAPL power measurement file """ pyRAPL.setup() measure = pyRAPL.Measurement('toto') measure.begin() measurable_function(1) measure.end() assert measure.result.pkg == [(POWER_CONSUMPTION_PKG - PKG_0_VALUE)] assert measure.result.dram == [(POWER_CONSUMPTION_DRAM - DRAM_0_VALUE)]
def main(args): # Number of darts that land inside. inside = 0 if (len(args) < 2): # Total number of darts to throw. total = int(input("Introduce the number of iterations n: ")) else: total = int(args[1]) if (ENERGIA): pyRAPL.setup() report = pyRAPL.outputs.DataFrameOutput() print(total) t0 = time.time() # Iterate for the number of darts. for i in range(0, total): # Generate random x, y in [0, 1]. x2 = r.random()**2 y2 = r.random()**2 # Increment if inside unit circle. if (x2 + y2) <= 1.0: inside += 1 # inside / total = pi / 4 pi = (float(inside) / total) * 4 # It works! print(pi) if (ENERGIA): print("Energia consumida: ", report.data.head()) t_final = time.time() print("tempo final:", t_final - t0)
import pyRAPL import pandas as pd import numpy as np from sklearn import datasets from mlfromscratch.utils import train_test_split from mlfromscratch.utils import get_random_subsets, normalize, standardize, calculate_entropy, accuracy_score from mlfromscratch.utils import mean_squared_error, calculate_variance, divide_on_feature, Plot import math import progressbar from mlfromscratch.utils.misc import bar_widgets pyRAPL.setup() csv_output = pyRAPL.outputs.CSVOutput('Hotspots_Random_forest_classifier.csv') class DecisionNode(): """Class that represents a decision node or leaf in the decision tree Parameters: ----------- feature_i: int Feature index which we want to use as the threshold measure. threshold: float The value that we will compare feature values at feature_i against to determine the prediction. value: float The class prediction if classification tree, or float value if regression tree. true_branch: DecisionNode Next decision node for samples where features value met the threshold. false_branch: DecisionNode Next decision node for samples where features value did not meet the threshold.
def setup(): global measure pyRAPL.setup() measure = pyRAPL.Measurement('global') measure.begin()
import pyRAPL import time pyRAPL.setup(devices=[pyRAPL.Device.PKG]) report = pyRAPL.outputs.PrintOutput() meter = pyRAPL.Measurement('bar', output=report) meter.begin() # Instructions to be evaluated # time.sleep(1) # # meter.end() meter.export(report) print('\n') skt = 0 time = meter._results.duration for energy in meter._results.pkg: power = energy / time print(f"Socket {skt}: {power} w") skt += 1
def __init__(self, **config): ### CPUEnv constant values. # POWER power cap to reach self.POWER = config.get('power', self.DEF_POWER) # SOCKET socket to get pyRAPL measures # CORES CPU cores assigned to SOCKET self.SOCKET = config.get('socket', self.DEF_SOCKET) self.CORES = config.get('cores', self.DEF_CORES) # MAXSTEPS maximum iterations for environment # SEED seed for RNG reporducibility self.MAXSTEPS = config.get('maxsteps', self.DEF_MAXSTEPS) self.SEED = config.get('seed', self.DEF_SEED) # MINPOWER minimum in power bandwidth # MAXPOWER maximum in power bandwidth self.MINPOWER = config.get('minpower', self.DEF_MINPOWER) self.MAXPOWER = config.get('maxpower', self.DEF_MAXPOWER) assert (self.MINPOWER < self.MAXPOWER) # DECISION_TIME time spent between actions (frequency change and power measure) # MEASURE_TIME time spent measuring energy data # SLEEP_TIME* waiting time after frequency change self.DECISION_TIME = config.get('decision_time', self.DEF_DECISION) self.MEASURE_TIME = config.get('measure_time', self.DECISION_TIME) self.SLEEP_TIME = self.DECISION_TIME - self.MEASURE_TIME # POWERSTEP size of intervals of observation space # POWERPOINTS extrema of power intervals # INTERVALS list power intervals self.POWERSTEP = config.get('powstep', self.DEF_POWERSTEP) self.POWERPOINTS = self.get_powerpoints(self.POWERSTEP) self.INTERVALS = self.get_intervals(self.POWERPOINTS) ### Default metadata. self.metadata = {'render.modes': ['human']} ### Frequency control. # _cpu cpufreq class control # _frequencies list of available frequencies (<= order) # _freqpos position of current frequency self._cpu = cpufreq.cpuFreq() self._frequencies = sorted(self._cpu.available_frequencies)[:-1] self._freqpos = -1 # Set used cores to 'userspace' scheme for frequency modification. self._cpu.set_governors('userspace', self.CORES) ### Power measure. pyRAPL.setup(devices=[pyRAPL.Device.PKG], socket_ids=[self.SOCKET]) ### Action space. # 0: hold frequency # 1: lower frequency # 2: raise frequency self.action_space = gym.spaces.Discrete(3) self.HOLD_FREQ = 0 self.LOWER_FREQ = 1 self.RAISE_FREQ = 2 ### Action rewards: # See 'get_reward()' # REWARD_CLOSER given when action approaches goal # REWARD_FARTHER given when action gets farther from goal # REWARD_GOAL given when action gets to goal state self.REWARD_CLOSER = +1 self.REWARD_FARTHER = -1 self.REWARD_GOAL = +2 ### Observation space: # Interval partition of power range of CPU. # Shape of intervals: (power_i, power_i+1] self.observation_space = gym.spaces.Discrete(len(self.INTERVALS) + 1) # _power: current power consumption # _state: interval of current power consumption # _goal: interval of self.LIMIT self._power = 0.0 self._state = 0 self._goal = self.get_state(self.POWER) ### CPUEnv: random number generator. # RNG random number generator self.RNG = None self.seed(self.SEED) ### CPUEnv: general environment variables. # _reward: accumulated environment reward # _done: boolean value to indicate if goal or max steps were reached # _info: dict for auxiliary debug values # _count: counts the number of steps taken during environment action self._reward = None self._acc_reward = None self._done = None self._info = None self._count = None self.reset()