async def monitor(self, print_metric_log: bool = False):
        logger = logging.getLogger(self._identifier)

        try:
            # launching perf
            self._perf = await asyncio.create_subprocess_exec(
                'perf',
                'stat',
                '-e',
                self._perf_config.event_str,
                '-p',
                str(self._bench_driver.pid),
                '-x',
                ',',
                '-I',
                str(self._perf_config.interval),
                stderr=asyncio.subprocess.PIPE)

            # setup for metric logger

            rabbit_mq_handler = RabbitMQHandler(self._rabbit_mq_config,
                                                self._bench_driver.name,
                                                self._bench_driver.wl_type,
                                                self._bench_driver.pid,
                                                self._perf.pid,
                                                self._perf_config.interval)
            rabbit_mq_handler.setFormatter(
                RabbitMQFormatter(self._perf_config.event_names))

            metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
            metric_logger.addHandler(rabbit_mq_handler)

            if print_metric_log:
                metric_logger.addHandler(logging.StreamHandler())

            with self._perf_csv.open('w') as fp:
                # print csv header
                fp.write(','.join(
                    chain(self._perf_config.event_names,
                          ('wall_cycles', 'llc_size', 'local_mem',
                           'remote_mem'))) + '\n')
            metric_logger.addHandler(logging.FileHandler(self._perf_csv))

            # perf polling loop

            num_of_events = len(self._perf_config.events)
            if self._perf_config.interval < 100:
                proc = await asyncio.create_subprocess_exec(
                    'perf',
                    '--version',
                    stdout=asyncio.subprocess.PIPE,
                    stderr=asyncio.subprocess.DEVNULL)
                version_line, _ = await proc.communicate()
                version_str = version_line.decode().split()[2]  # type: str
                major, minor = map(
                    int,
                    version_str.split('.')[:2])  # type: int, int
                if (major, minor) < (4, 17):
                    # remove warning message of perf from buffer
                    await self._perf.stderr.readline()

            prev_tsc = rdtsc.get_cycles()
            _, prev_local_mem, prev_total_mem = await self._bench_driver.read_resctrl(
            )
            while self._bench_driver.is_running and self._perf.returncode is None:
                record = []
                ignore_flag = False

                for _ in range(num_of_events):
                    raw_line = await self._perf.stderr.readline()

                    line = raw_line.decode().strip()
                    try:
                        value = line.split(',')[1]
                        float(value)
                        record.append(value)
                    except (IndexError, ValueError) as e:
                        ignore_flag = True
                        logger.debug(
                            f'a line that perf printed was ignored due to following exception : {e}'
                            f' and the line is : {line}')

                tmp = rdtsc.get_cycles()
                record.append(str(tmp - prev_tsc))
                prev_tsc = tmp

                llc_occupancy, local_mem, total_mem = await self._bench_driver.read_resctrl(
                )
                record.append(str(llc_occupancy))

                cur_local_mem = local_mem - prev_local_mem
                record.append(str(cur_local_mem))
                prev_local_mem = local_mem

                record.append(
                    str(max(total_mem - prev_total_mem - cur_local_mem, 0)))
                prev_total_mem = total_mem

                if not ignore_flag:
                    metric_logger.info(','.join(record))

            logger.info('end of monitoring loop')

            self._kill_perf()

        except CancelledError as e:
            logger.debug(f'The task cancelled : {e}')
            self._stop()

        finally:
            try:
                self._kill_perf()
                self._bench_driver.stop()
            except (psutil.NoSuchProcess, ProcessLookupError):
                pass

            await self._bench_driver.cleanup()
            logger.info('The benchmark is ended.')
            self._remove_logger_handlers()
            self._end_time = time.time()
Exemple #2
0
"""cSHAKE test script
    Author: Ran Pang
    
    Test vectors are from NIST: http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/cSHAKE_samples.pdf
"""

from cSHAKE import *
import rdtsc

start1 = rdtsc.get_cycles()
sample1 = cSHAKE128(unhexlify('00010203'), 256, '', 'Email Signature')
stop1 = rdtsc.get_cycles()
if sample1 == 'c1c36925b6409a04f1b504fcbca9d82b4017277cb5ed2b2065fc1d3814d5aaf5':
    print 'Sample1 correct!'
else:
    print 'Sample1 incorrect!'

start2 = rdtsc.get_cycles()
sample2 = cSHAKE128(unhexlify('000102030405060708090a0b0c0d0E0f101112131415161718191a1b1c1d1E1f202122232425262728292a2b2c2d2E2f303132333435363738393a3b3c3d3E3f404142434445464748494a4b4c4d4E4f505152535455565758595a5b5c5d5E5f606162636465666768696a6b6c6d6E6f707172737475767778797a7b7c7d7E7f808182838485868788898a8b8c8d8E8f909192939495969798999a9b9c9d9E9fa0a1a2a3a4a5a6a7a8a9aaabacadaEafb0b1b2b3b4b5b6b7b8b9babbbcbdbEbfc0c1c2c3c4c5c6c7'), 256, '', 'Email Signature')
stop2 = rdtsc.get_cycles()
if sample2 == 'c5221d50e4f822d96a2e8881a961420f294b7b24fe3d2094baed2c6524cc166b':
    print 'Sample2 correct!'
else:
    print 'Sample2 incorrect!'

start3 = rdtsc.get_cycles()
sample3 = cSHAKE128(unhexlify(''), 256, '', '')
stop3 = rdtsc.get_cycles()
if sample3 == '7f9c2ba4e88f827d616045507605853ed73b8093f6efbc88eb1a6eacfa66ef26':
    print 'Sample3 correct!'
else:
Exemple #3
0
    async def on_init(self, context: Context) -> None:
        await super().on_init(context)

        self._prev_data = rdtsc.get_cycles()
Exemple #4
0
 async def monitor_once(self, context: Context) -> int:
     return rdtsc.get_cycles()
Exemple #5
0
    def __init__(self, interval: int) -> None:
        super().__init__(interval)

        self._prev_data = rdtsc.get_cycles()
        self._is_stopped = False
Exemple #6
0
# https://github.com/Roguelazer/rdtsc
from time import sleep
from rdtsc import get_cycles
while True:
    asap = [get_cycles() for _ in range(10)]
    for value, prev in zip(asap, [asap[0]] + asap):
        print(value, value - prev)
    print('-' * 15)
    sleep(0.001)  # to prevent 100% CPU
Exemple #7
0
import numpy as np

parser = argparse.ArgumentParser(
    description='Benchmarking cycles for a program')
parser.add_argument('-f',
                    '--file',
                    dest='filename',
                    type=str,
                    help='File to benchmark')
parser.add_argument('-i',
                    '--iterations',
                    dest='iterations',
                    type=int,
                    help='Number of times to run compiled code')
args = parser.parse_args()

with open(args.filename, 'r') as f:
    tree = ast.parse(f.read())

compiled = compile(tree, filename="<ast>", mode="exec")
cycles = []
for i in range(args.iterations):
    start = rdtsc.get_cycles()
    exec(compiled)
    end = rdtsc.get_cycles()
    cycles.append(end - start)
cycles = np.array(cycles)
print(
    f'For {args.iterations} cycles\nMean:\t{np.mean(cycles)}\tMax:\t{np.max(cycles)}\tMin:\t{np.min(cycles)}\tStd:\t{np.std(cycles)}\tVar:\t{np.var(cycles)}'
)
Exemple #8
0
def test_basic():
    start = rdtsc.get_cycles()
    end = rdtsc.get_cycles()
    assert isinstance(end, six.integer_types)
    assert isinstance(start, six.integer_types)
    assert end > start
Exemple #9
0
                                     activation=activation,
                                     input_shape=(self.w_in, ))


class NotFoundError(ValueError):
    pass


NAMES = ('conv', 'pool', 'fc')
validators = {name: LinearRegression() for name in NAMES}

cid = time.pthread_getcpuclockid(threading.get_ident())

# different possible function for benchmarking
get_time1 = lambda: time.clock_gettime_ns(cid)
get_time2 = lambda: rdtsc.get_cycles()

# =========================== SEARCH SPACE =====================================

POSSIBLE_FILTER_WIDTHS = [3, 5, 7, 9, 11, 13, 15, 17]
POSSIBLE_STRIDES = [2, 3, 4]
POSSIBLE_FILTERS_AMOUNT = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]
POSSIBLE_PADDINGS = ['same', 'valid']

POSSIBLE_UNITS = [8, 16, 32, 64, 128, 256, 512]

# ==============================================================================


# [y_1, y_2, y_3 ... y_p] - number of LLC misses on i-th probe
def get_probes(n: int, p: int, model: tf.keras.models.Model) -> np.ndarray: