def __init__(self, psutil, rpyc=None): self._psutil = psutil self.rpyc = rpyc self._conn = None self.log = utils.get_logger(__name__) # required to define the columns for the data frame storer self.fields = [ 'call_index', 'total_cpu_percent', 'percent_cpu_sys', 'percent_cpu_usr', 'percent_cpu_idle', 'percent_cpu_iow', 'phymem_percent_usage', 'load_avg', ] # this call should ensure we have the correct type self._times_tup_type = psutil.cpu_times().__class__ self.log = utils.get_logger(type(self).__name__) # initial cpu usage self._last_cpu_times = self.psutil.cpu_times()
def __init__(self, name, dtype, buf_size=2**10, path=None, storetype=None): self.name = name try: self.dtype = pd.np.dtype(dtype) if pd else dtype except TypeError: # set all columns to float64 self.dtype = pd.np.dtype( list(zip(dtype, itertools.repeat('float64'))) ) self.log = utils.get_logger(type(self).__name__) # allocated a shared mem np structured array self._buf_size = buf_size # purely for testing self._buffer = RingBuffer( dtype=self.dtype, size=buf_size) if pd else None # parent proc read-only access to disk store self.storetype = storetype or CSVStore self._storepath = path or tmpfile(self.storetype.ext) self.store = self.storetype(self._storepath, dtypes=self.dtype) self.queue = mp.Queue() self._iput = 0 # queue put counter # disable SIGINT while we spawn signal.signal(signal.SIGINT, signal.SIG_IGN) # setup bg writer self._writer = mp.Process( target=_consume_and_write, args=( self.queue, self._storepath, self.store, self._buffer), name='{}_frame_writer'.format(self.name), ) self._writer.start() # re-enable SIGINT signal.signal(signal.SIGINT, signal.default_int_handler) # kill subproc on exit atexit.register(self.stopwriter) # ensure writer is initialized path = self.queue.get(timeout=3) assert path == self._storepath
def _consume_and_write(queue, path, store, sharr): """Insert :var:`row` received from the queue into the shared memory array at the current index and increment. Empty rows are always written to disk (keeps stores 'call-index-aligned'). """ proc = mp.current_process() slog = utils.get_logger(proc.name) log = mp.log_to_stderr(slog.getEffectiveLevel()) log.debug("starting storage writer '{}'".format(proc.name)) log.info("storage path is '{}'".format(path)) log.debug("sharr is '{}'".format(sharr)) # set up a new store instance for writing with store.writer(path, dtypes=store.dtypes) as store: # notify parent that file has been created queue.put(path) # handle no pandas/np case buff = store if sharr is None else sharr bufftype = type(buff) log.debug('buffer type is {}'.format(bufftype)) for row in iter(queue.get, Terminate): # consume and process now = time.time() # write frame to disk on buffer fill if sharr and sharr.is_full(): log.debug('writing to {} storage...'.format(store.ext)) try: # push a data frame store.put(pd.DataFrame.from_records(buff.read())) except ValueError: log.error(traceback.format_exc()) log.debug("storage put took '{}'".format(time.time() - now)) try: # push to ring buffer (or store if no pd) buff.put(row) log.debug("{} insert took '{}'".format( bufftype, time.time() - now)) except ValueError: log.error(traceback.format_exc()) log.debug("terminating frame writer '{}'".format(proc.name))
def __init__(self): self.log = utils.get_logger(__name__)
def __init__(self, listener=None, array=None, pool=None): self.listener = weakref.proxy(listener) if listener else None self._array = array if array else new_array() # np default buffer self.log = utils.get_logger(__name__) self.pool = pool if pool else self.listener
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import pickle from functools import partial from collections import OrderedDict, namedtuple from switchy import utils from .storage import * # re-export(s) from .cdr import CDR log = utils.get_logger(__name__) def plot_df(df, figspec, **kwargs): """Plot a pandas data frame according to the provided `figspec` """ from .mpl_helpers import multiplot return multiplot(df, figspec=figspec, **kwargs) Measurer = namedtuple("Measurer", 'app ppkwargs storer ops') class Measurers(object): """A dict-like collection of measurement apps with sub-references to each app's `DataStorer` and optional metrics computing callables.
rates = 1. / (view['time'][1:] - self.view['time'][:-1]) # bound rate measures rates[rates > 300] = 300 return rates @property def wm_rate(self): '''The rolling average call rate windowed over 100 calls ''' return moving_avg(self.inst_rate, n=100) try: from mpl_helpers import multiplot except ImportError: log = utils.get_logger() if not log.handlers: utils.log_to_stderr() log.warn( "Matplotlib must be installed for graphing support" ) else: def plot(self, block=False): view = self.view view.sort(order='time') # sort array by time stamp self.mng, self.fig, self.artists = multiplot(view, fieldspec=[ ('time', None), # this field will not be plotted # latencies ('answer_latency', (1, 1)), ('call_setup_latency', (1, 1)), ('invite_latency', (1, 1)),
def __init__(self): self.log = utils.get_logger(__name__) self._call_counter = itertools.count(0)
rates = 1. / (view['time'][1:] - self.view['time'][:-1]) # bound rate measures rates[rates > 300] = 300 return rates @property def wm_rate(self): '''The rolling average call rate windowed over 100 calls ''' return moving_avg(self.inst_rate, n=100) try: from mpl_helpers import multiplot except ImportError: log = utils.get_logger() if not log.handlers: utils.log_to_stderr() log.warn("Matplotlib must be installed for graphing support") else: def plot(self, block=False): view = self.view view.sort(order='time') # sort array by time stamp self.mng, self.fig, self.artists = multiplot( view, fieldspec=[ ('time', None), # this field will not be plotted # latencies ('answer_latency', (1, 1)), ('call_setup_latency', (1, 1)),