def _makebundlefile(part): """constructs a temporary bundle file part.data should be an uncompressed v1 changegroup""" fp = None fd, bundlefile = tempfile.mkstemp() try: # guards bundlefile try: # guards fp fp = os.fdopen(fd, 'wb') magic = 'HG10UN' fp.write(magic) data = part.read(resource.getpagesize() - len(magic)) while data: fp.write(data) data = part.read(resource.getpagesize()) finally: fp.close() except: try: os.unlink(bundlefile) except: # we would rather see the original exception pass raise return bundlefile
def periodicCheck(_reactor=reactor): # Measure how much garbage we have garbage_count = len(gc.garbage) MetricCountEvent.log('gc.garbage', garbage_count, absolute=True) if garbage_count == 0: level = ALARM_OK else: level = ALARM_WARN MetricAlarmEvent.log('gc.garbage', level=level) if resource: r = resource.getrusage(resource.RUSAGE_SELF) attrs = ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_ixrss', 'ru_idrss', 'ru_isrss', 'ru_minflt', 'ru_majflt', 'ru_nswap', 'ru_inblock', 'ru_oublock', 'ru_msgsnd', 'ru_msgrcv', 'ru_nsignals', 'ru_nvcsw', 'ru_nivcsw'] for i,a in enumerate(attrs): # Linux versions prior to 2.6.32 didn't report this value, but we # can calculate it from /proc/<pid>/statm v = r[i] if a == 'ru_maxrss' and v == 0: v = _get_rss() * resource.getpagesize() / 1024 MetricCountEvent.log('resource.%s' % a, v, absolute=True) MetricCountEvent.log('resource.pagesize', resource.getpagesize(), absolute=True) # Measure the reactor delay then = util.now(_reactor) dt = 0.1 def cb(): now = util.now(_reactor) delay = (now - then) - dt MetricTimeEvent.log("reactorDelay", delay) _reactor.callLater(dt, cb)
def native_map_io_space(self, base, size, unused_cache_type): """Map to memory a specific region.""" if self.devmem_available() and not self.memory_mapping(base, size): if logger().VERBOSE: logger().log("[helper] Mapping 0x%x to memory" % (base)) length = max(size, resource.getpagesize()) page_aligned_base = base - (base % resource.getpagesize()) mapping = MemoryMapping(self.dev_mem, length, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE, offset=page_aligned_base) self.mappings.append(mapping)
def __init__(self, label=None): """ @param (basestring) label ロギング時に [label] と表示される """ self.start_time = time.time() self.latest_time = time.time() self.label = label self._inner_log = [] self.start_mem = float(getrusage(RUSAGE_SELF)[6]*getpagesize()) self.latest_mem = float(getrusage(RUSAGE_SELF)[6]*getpagesize())
def periodicCheck(_reactor=reactor): try: # Measure how much garbage we have garbage_count = len(gc.garbage) MetricCountEvent.log("gc.garbage", garbage_count, absolute=True) if garbage_count == 0: level = ALARM_OK else: level = ALARM_WARN MetricAlarmEvent.log("gc.garbage", level=level) if resource: r = resource.getrusage(resource.RUSAGE_SELF) attrs = [ "ru_utime", "ru_stime", "ru_maxrss", "ru_ixrss", "ru_idrss", "ru_isrss", "ru_minflt", "ru_majflt", "ru_nswap", "ru_inblock", "ru_oublock", "ru_msgsnd", "ru_msgrcv", "ru_nsignals", "ru_nvcsw", "ru_nivcsw", ] for i, a in enumerate(attrs): # Linux versions prior to 2.6.32 didn't report this value, but we # can calculate it from /proc/<pid>/statm v = r[i] if a == "ru_maxrss" and v == 0: v = _get_rss() * resource.getpagesize() / 1024 MetricCountEvent.log("resource.%s" % a, v, absolute=True) MetricCountEvent.log("resource.pagesize", resource.getpagesize(), absolute=True) # Measure the reactor delay then = util.now(_reactor) dt = 0.1 def cb(): now = util.now(_reactor) delay = (now - then) - dt MetricTimeEvent.log("reactorDelay", delay) _reactor.callLater(dt, cb) except Exception: log.err(None, "while collecting VM metrics")
def get_mem_usage_virt_and_res(): """ This only works on Linux, and only if the /proc/$PID/statm output is the same as that in linux kernel 2.6. Also `os.getpid()' must work. """ try: import resource except ImportError: raise NotSupportedException # sample output from cat /proc/$PID/statm: # 14317 3092 832 279 0 2108 0 a = os.popen("cat /proc/%s/statm" % os.getpid()).read().split() if not len(a) > 1: raise NotSupportedException return (int(a[0]) * resource.getpagesize(), int(a[1]) * resource.getpagesize(),)
def _check_mlock_unevictable(self): """ Check nr_mlock and nr_unevictable with guest memory """ if self.realtime_mlock == "on": vm_pages = self.vm_mem * 1024 * 1024 / getpagesize() nr_mlock = self.mlock_post - self.mlock_pre nr_unevictable = self.unevictable_post - self.unevictable_pre if nr_mlock < vm_pages: self.test.fail("nr_mlock is not fit with VM memory" " when mlock is %s!" " nr_mlock = %d, vm_mem = %d." % (self.realtime_mlock, nr_mlock, self.vm_mem)) if nr_unevictable < vm_pages: self.test.fail("nr_unevictable is not fit with VM memory" " when mlock is %s!" " nr_unevictable = %d, vm_mem = %d." % (self.realtime_mlock, nr_unevictable, self.vm_mem)) else: if self.mlock_post != self.mlock_pre: self.test.fail("mlock_post != mlock_pre when mlock is %s!" % self.realtime_mlock) if self.unevictable_post != self.unevictable_pre: self.test.fail("unevictable_post != unevictable_pre" " when mlock is %s!" % self.realtime_mlock)
def main(): """Main Loop.""" APPNAME = str(__package__ or __doc__)[:99].lower().strip().replace(" ", "") if not sys.platform.startswith("win") and sys.stderr.isatty(): def add_color_emit_ansi(fn): """Add methods we need to the class.""" def new(*args): """Method overload.""" if len(args) == 2: new_args = (args[0], copy(args[1])) else: new_args = (args[0], copy(args[1]), args[2:]) if hasattr(args[0], 'baseFilename'): return fn(*args) levelno = new_args[1].levelno if levelno >= 50: color = '\x1b[31;5;7m\n ' # blinking red with black elif levelno >= 40: color = '\x1b[31m' # red elif levelno >= 30: color = '\x1b[33m' # yellow elif levelno >= 20: color = '\x1b[32m' # green elif levelno >= 10: color = '\x1b[35m' # pink else: color = '\x1b[0m' # normal try: new_args[1].msg = color + str(new_args[1].msg) + ' \x1b[0m' except Exception as reason: print(reason) # Do not use log here. return fn(*new_args) return new # all non-Windows platforms support ANSI Colors so we use them log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit) log.basicConfig(level=-1, format="%(levelname)s:%(asctime)s %(message)s") log.getLogger().addHandler(log.StreamHandler(sys.stderr)) log.info(__doc__) try: os.nice(19) # smooth cpu priority libc = cdll.LoadLibrary('libc.so.6') # set process name buff = create_string_buffer(len(APPNAME) + 1) buff.value = bytes(APPNAME.encode("utf-8")) libc.prctl(15, byref(buff), 0, 0, 0) except Exception as reason: log.warning(reason) signal.signal(signal.SIGINT, signal.SIG_DFL) # CTRL+C work to quit app app = QApplication(sys.argv) app.setApplicationName(APPNAME) app.setOrganizationName(APPNAME) app.setOrganizationDomain(APPNAME) app.instance().setQuitOnLastWindowClosed(False) # no quit on dialog close icon = QIcon(app.style().standardPixmap(QStyle.SP_FileIcon)) app.setWindowIcon(icon) win = MainWindow(icon) win.show() log.info('Total Maximum RAM Memory used: ~{} MegaBytes.'.format(int( resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize() / 1024 / 1024 if resource else 0))) sys.exit(app.exec_())
def memory_used(): """Returns the amount of resident memory in use in MBs. """ # FIXME Need to fill out appropriate methods here for # different platforms. # For Linux use the proc filesystem. Use 'statm' as easier # to parse than 'status' file. # # /proc/[number]/statm # Provides information about memory usage, measured in pages. # The columns are: # # size total program size # (same as VmSize in /proc/[number]/status) # resident resident set size # (same as VmRSS in /proc/[number]/status) # share shared pages (from shared mappings) # text text (code) # lib library (unused in Linux 2.6) # data data + stack # dt dirty pages (unused in Linux 2.6) if sys.platform == 'linux2': pid = os.getpid() statm = '/proc/%d/statm' % pid fp = None try: fp = open(statm, 'r') rss_pages = float(fp.read().split()[1]) memory_bytes = rss_pages * resource.getpagesize() return memory_bytes / (1024*1024) except Exception: pass finally: if fp: fp.close() # Fallback to trying to use getrusage(). The units returned # can differ based on platform. Assume 1024 byte blocks as # default. if 'resource' in sys.modules: rusage = resource.getrusage(resource.RUSAGE_SELF) if sys.platform == 'darwin': # On MacOS X, despite the manual page saying the # value is in kilobytes, it is actually in bytes. memory_bytes = float(rusage.ru_maxrss) return memory_bytes / (1024*1024) else: memory_kbytes = float(rusage.ru_maxrss) return memory_kbytes / 1024 # Fallback to indicating no memory usage. return 0
def getProcRss(procId): # http://man7.org/linux/man-pages/man5/proc.5.html stats = open("/proc/%i/stat" % procId).read().split() mstats = open("/proc/%i/statm" % procId).read().split() rss1 = int(stats[23]) rss2 = int(mstats[1]) return rss2 * resource.getpagesize()
def test_PAGESIZE(self): # pagesize is used internally to perform different calculations # and it's determined by using SC_PAGE_SIZE; make sure # getpagesize() returns the same value. import resource self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize())
def _getvmstat(self): """Get system virtual memory statistics. """ vmstat_dict = {} ps = getpagesize() vm = psutil.virtual_memory() vmstat_dict['pages_free'] = vm.free//ps vmstat_dict['pages_used'] = vm.used//ps vmstat_dict['pages_total'] = vm.total//ps vmstat_dict['pages_available'] = vm.available//ps if hasattr(vm, 'active'): vmstat_dict['pages_active'] = vm.active//ps if hasattr(vm, 'inactive'): vmstat_dict['pages_inactive'] = vm.inactive//ps if hasattr(vm, 'wired'): vmstat_dict['pages_wired_down'] = vm.wired//ps if hasattr(vm, 'cached'): vmstat_dict['pages_cached'] = vm.cached//ps sm = psutil.swap_memory() vmstat_dict['ctr_pageins'] = sm.sin//ps vmstat_dict['ctr_pageouts'] = sm.sout//ps return vmstat_dict
def memoryWriteLoop(logfileLocation, interval): try: if isinstance(logfileLocation, file): outputFile = logfileLocation else: outputFile = open(logfileLocation, "w", 0) def pad(s, finalLength): return s + " " * (finalLength - len(s)) def formatCols(columns): return "".join([pad(columns[0],30)] + [pad(c, 15) for c in columns[1:]]) columns = ["timestamp", "total size MB", "RSS MB", "shared MB", "code MB", "stack MB", "library MB", "dirty MB"] print >> outputFile, formatCols(columns) while True: with open("/proc/%s/statm" % os.getpid(), "r") as f: data = ["%.2f" % (int(x) * resource.getpagesize() / 1024 / 1024.0) for x in f.readline().split(" ")] print >> outputFile, formatCols([time.strftime("%Y-%m-%dT%H:%M:%S")] + data) time.sleep(interval) except: import traceback traceback.format_exc()
def GetMemoryStats(self, pid): status = self._GetProcFileDict('/proc/%s/status' % pid) stats = open('/proc/%s/stat' % pid, 'r').read().split(' ') return {'VM': int(stats[22]), 'VMPeak': status['VmPeak'] * 1024, 'WorkingSetSize': int(stats[23]) * resource.getpagesize(), 'WorkingSetSizePeak': status['VmHWM'] * 1024}
def __init__(self, config, logger, readq): super(Netstat, self).__init__(config, logger, readq) self.page_size = resource.getpagesize() try: self.sockstat = open("/proc/net/sockstat") self.netstat = open("/proc/net/netstat") self.snmp = open("/proc/net/snmp") except IOError: self.log_exception('open failed') self.cleanup() raise with utils.lower_privileges(self._logger): # Note: up until v2.6.37-rc2 most of the values were 32 bits. # The first value is pretty useless since it accounts for some # socket types but not others. So we don't report it because it's # more confusing than anything else and it's not well documented # what type of sockets are or aren't included in this count. self.regexp = re.compile("sockets: used \d+\n" "TCP: inuse (?P<tcp_inuse>\d+) orphan (?P<orphans>\d+)" " tw (?P<tw_count>\d+) alloc (?P<tcp_sockets>\d+)" " mem (?P<tcp_pages>\d+)\n" "UDP: inuse (?P<udp_inuse>\d+)" # UDP memory accounting was added in v2.6.25-rc1 "(?: mem (?P<udp_pages>\d+))?\n" # UDP-Lite (RFC 3828) was added in v2.6.20-rc2 "(?:UDPLITE: inuse (?P<udplite_inuse>\d+)\n)?" "RAW: inuse (?P<raw_inuse>\d+)\n" "FRAG: inuse (?P<ip_frag_nqueues>\d+)" " memory (?P<ip_frag_mem>\d+)\n")
def __init__(self, engine, config_dict): super(PiMonitor, self).__init__(engine, config_dict) d = config_dict.get('PiMonitor', {}) self.process = d.get('process', 'weewxd') self.max_age = weeutil.weeutil.to_int(d.get('max_age', 2592000)) self.page_size = resource.getpagesize() # get the remote_url from weewx.conf, defaulting to a sane default # this does not work #self.remote_url = d.get('remote_url', 'http://localhost/test.json') self.remote_url = d.get('remote_url', 'http://r/t.json') # get the database parameters we need to function binding = d.get('data_binding', 'pi_binding') self.dbm = self.engine.db_binder.get_manager(data_binding=binding, initialize=True) # be sure database matches the schema we have dbcol = self.dbm.connection.columnsOf(self.dbm.table_name) dbm_dict = weewx.manager.get_manager_dict( config_dict['DataBindings'], config_dict['Databases'], binding) picol = [x[0] for x in dbm_dict['schema']] if dbcol != picol: raise Exception('pi schema mismatch: %s != %s' % (dbcol, picol)) self.last_ts = None self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
def execute(target, directory): page_size = resource.getpagesize() rows = [] if directory: for (path, dirs, files) in os.walk(target): for myfile in files: f = os.path.join(path, myfile) fd = file(f,'r') file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if file_size == 0: fd.close() continue pages_cached, pages_total = ftools.fincore_ratio(fd.fileno()) fd.close() rows.append([f, file_size, pages_total, pages_cached, (pages_cached * page_size), (float(pages_cached) / float(pages_total)) * 100.0]) else: fd = file(target, 'r') file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if file_size == 0: fd.close() return None else: pages_cached, pages_total = ftools.fincore_ratio(fd.fileno()) fd.close() rows.append([target, file_size, pages_total, pages_cached, (pages_cached * page_size), (float(pages_cached) / float(pages_total)) * 100.0]) rows = sorted(rows, key=lambda t: t[5], reverse=True) return rows
def test_constant_values(self): """test that constants are what I expect""" self.assertEqual(posix_ipc.O_CREAT, os.O_CREAT) self.assertEqual(posix_ipc.O_EXCL, os.O_EXCL) self.assertEqual(posix_ipc.O_CREX, posix_ipc.O_CREAT | posix_ipc.O_EXCL) self.assertEqual(posix_ipc.O_TRUNC, os.O_TRUNC) self.assertEqual(posix_ipc.PAGE_SIZE, resource.getpagesize()) self.assertIn(posix_ipc.SEMAPHORE_TIMEOUT_SUPPORTED, (True, False)) self.assertIn(posix_ipc.SEMAPHORE_VALUE_SUPPORTED, (True, False)) self.assertGreaterEqual(posix_ipc.SEMAPHORE_VALUE_MAX, 1) self.assertIn(posix_ipc.MESSAGE_QUEUES_SUPPORTED, (True, False)) if posix_ipc.MESSAGE_QUEUES_SUPPORTED: self.assertGreaterEqual(posix_ipc.QUEUE_MESSAGES_MAX_DEFAULT, 1) self.assertGreaterEqual(posix_ipc.QUEUE_MESSAGE_SIZE_MAX_DEFAULT, 1) self.assertGreaterEqual(posix_ipc.QUEUE_PRIORITY_MAX, 0) if hasattr(posix_ipc, 'USER_SIGNAL_MIN'): self.assertGreaterEqual(posix_ipc.USER_SIGNAL_MIN, 1) if hasattr(posix_ipc, 'USER_SIGNAL_MAX'): self.assertGreaterEqual(posix_ipc.USER_SIGNAL_MAX, 1) self.assertTrue(isinstance(posix_ipc.VERSION, str))
def __init__(self, commands, timeout, interval, output_dir=None, monitor_dir=None): self.start_time = time() self.end_time = self.start_time + timeout if timeout else 0 # Do not time out if time_limit is 0. self._interval = interval self._rm = ResourceMonitor(output_dir, commands) if monitor_dir: self.monitor_file = open(monitor_dir + "/resource_usage.log", "w", (1024 ** 2) * 10) # Set the file's buffering to 10MB # We read the jiffie -> second conversion rate from the os, by dividing the utime # and stime values by this conversion rate we will get the actual cpu seconds spend during this second. try: sc_clk_tck = float(sysconf(sysconf_names['SC_CLK_TCK'])) except AttributeError: sc_clk_tck = 100.0 try: import resource pagesize = resource.getpagesize() except: pagesize = 4 * 1024 self.monitor_file.write(json.dumps({"sc_clk_tck": sc_clk_tck, 'pagesize': pagesize}) + "\n") else: self.monitor_file = None # Capture SIGTERM to kill all the child processes before dying self.stopping = False signal(SIGTERM, self._termTrap)
def memory_sum(): logging.info("Getting memory all") dic = {} dic["size"] = 0 dic["resident"] = 0 dic["share"] = 0 dic["text"] = 0 dic["SUM"] = 0 pids = [] for pid in os.listdir("/proc"): if pid.isdigit(): pids.append(pid) for pid in pids: try: with open("/proc/{}/statm".format(pid)) as f: mem = f.read().split() dic["size"] += int(mem[0]) dic["resident"] += int(mem[1]) dic["share"] += int(mem[2]) dic["text"] += int(mem[3]) dic["SUM"] += int(mem[0]) + int(mem[1]) + int(mem[2]) + \ int(mem[3]) # + int(mem[5]) except FileNotFoundError: logging.error("/proc/{}/statm not found".format(pid)) continue pagesize = resource.getpagesize() for key in dic: dic[key] *= pagesize / 2**20 return dic
def cma_stats(self): """Get current CMA memory Stats. `CMA Memory Available` : Systemwide CMA memory availability. `CMA Memory Usage` : CMA memory used by current object. `Buffer Count` : Buffers allocated by current object. Parameters ---------- None Returns ------- dict Dictionary of current stats. """ stats = {} free_pages = libxlnk.cma_pages_available() stats['CMA Memory Available'] = resource.getpagesize() * free_pages memused = 0 for key in self.bufmap: memused += self.bufmap[key] stats['CMA Memory Usage'] = memused stats['Buffer Count'] = len(self.bufmap) return stats
def get_memusage(mmaps=True): ru = resource.getrusage(resource.RUSAGE_SELF) pgsize = resource.getpagesize() maxrss = (ru.ru_maxrss * pgsize / 1e6) #print 'shared memory size:', (ru.ru_ixrss / 1e6), 'MB' #print 'unshared memory size:', (ru.ru_idrss / 1e6), 'MB' #print 'unshared stack size:', (ru.ru_isrss / 1e6), 'MB' #print 'shared memory size:', ru.ru_ixrss #print 'unshared memory size:', ru.ru_idrss #print 'unshared stack size:', ru.ru_isrss mu = dict(maxrss=[maxrss, 'MB']) pid = os.getpid() try: mu.update(_read_proc_status(pid)) except: pass # /proc/meminfo ? if mmaps: try: mu.update(_read_proc_maps(pid)) except: pass return mu
def unmap(self, start, length): pagesize = resource.getpagesize() if length % pagesize != 0: length = ((length / pagesize) + 1) * pagesize print length prefix = None suffix = None if start > self.address: prefix = MMap(start-self.address) prefix.setAddress(self.address) end = start + length myend = self.address + self.length if end < myend: suffix = MMap(myend - end) suffix.setAddress(end) retval = [] if prefix: retval.append(prefix) if suffix: retval.append(suffix) return retval
def get_page_size(): global cache_page_size if not cache_page_size: cache_page_size = resource.getpagesize() return cache_page_size
def mem(): ''' Return mem usage in MB ''' usage=resource.getrusage(resource.RUSAGE_SELF) val = (usage[2]*resource.getpagesize())/1000000.0 return val
def _linux_physical_memory_used(filename=None): # For Linux we can use information from the proc filesystem. We use # '/proc/statm' as it easier to parse than '/proc/status' file. The # value queried from the file is always in bytes. # # /proc/[number]/statm # Provides information about memory usage, measured # in pages. The columns are: # # size total program size # (same as VmSize in /proc/[number]/status) # resident resident set size # (same as VmRSS in /proc/[number]/status) # share shared pages (from shared mappings) # text text (code) # lib library (unused in Linux 2.6) # data data + stack # dt dirty pages (unused in Linux 2.6) filename = filename or '/proc/%d/statm' % os.getpid() try: with open(filename, 'r') as fp: rss_pages = float(fp.read().split()[1]) memory_bytes = rss_pages * resource.getpagesize() return memory_bytes / (1024*1024) except Exception: return 0
def getmem(self): ### memory usage as measure in pages path = '/proc/%s/statm' % str(self.p.pid) with open(path, 'r') as f: statm = f.read() total = int(statm.split(' ')[1]) ## resident size return total * resource.getpagesize()
def get_socket_info(self, openr=open): """ get info from /proc/net/sockstat and sockstat6 Note: The mem value is actually kernel pages, but we return bytes allocated based on the systems page size. """ sockstat = {} try: with openr('/proc/net/sockstat', 'r') as proc_sockstat: for entry in proc_sockstat: if entry.startswith("TCP: inuse"): tcpstats = entry.split() sockstat['tcp_in_use'] = int(tcpstats[2]) sockstat['orphan'] = int(tcpstats[4]) sockstat['time_wait'] = int(tcpstats[6]) sockstat['tcp_mem_allocated_bytes'] = \ int(tcpstats[10]) * getpagesize() except IOError as e: if e.errno != errno.ENOENT: raise try: with openr('/proc/net/sockstat6', 'r') as proc_sockstat6: for entry in proc_sockstat6: if entry.startswith("TCP6: inuse"): sockstat['tcp6_in_use'] = int(entry.split()[2]) except IOError as e: if e.errno != errno.ENOENT: raise return sockstat
def detectFstype(device): pagesize = resource.getpagesize() # open device fd = None try: try: fd = open(device, "r") except Exception, msg: log.debug2(msg) return None # read pagesize bytes (at least needed for swap) try: buf = fd.read(pagesize) except: # ignore message return None if len(buf) < pagesize: return None ext2magic = ext2_journal = ext2_has_journal = 0 try: (ext2magic,) = struct.unpack("H", buf[1024+56:1024+56+2]) (ext2_journal,) = struct.unpack("I", buf[1024+96:1024+96+4]) (ext2_has_journal,) = struct.unpack("I", buf[1024+92:1024+92+4]) except Exception, msg: raise Exception, msg
def main(): block_size = resource.getpagesize() address = ("localhost", 8090) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(address) except OSError as e: print(str.format("[#] Client connection error: {}", e)) sys.exit(1) # 70 Mb size data = b"foobar\n" * 10 * 1024 * 1024 data_size = len(data) print(str.format("[*] Sending data size: {} bytes (or {} Mb)", data_size, data_size / 1024 / 1024)) total = 0 try: while total < data_size: sent = sock.send(data[total:]) print(str.format("[*] Sent {} bytes per one system call", sent)) if sent == 0: raise RuntimeError(str.format("[#] Socket connection error with host {}", address)) total += sent except KeyboardInterrupt: print("[#] Interrupted by Ctrl + C") print(str.format("[*] Sent {} bytes (or {} Mb)", total, total / 1024 / 1024)) finally: sock.close() print(str.format("[*] Connection {} closed", address))
if sys.version_info[:2] >= (3, 0): unicode = None object_or_InstanceType = object def cmp(a, b): return (a > b) - (b > a) else: import types object_or_InstanceType = types.InstanceType # =========================================================================== try: # Unix system import resource pageSize = resource.getpagesize() except ImportError: # Windows system import mmap pageSize = mmap.PAGESIZE megabyte = 1024 * 1024 def meminfo(): pid = os.getpid() try: data = open('/proc/%d/statm' % pid).read() except: raise NotImplementedError data = map(int, data.split()) size, resident, shared, trs, drs, lrs, dt = tuple(data)
from typing import Optional, Dict, Union, Tuple from tempfile import NamedTemporaryFile import gi gi.require_version("GExiv2", "0.10") from gi.repository import GExiv2 from PyQt5.QtGui import QImage from raphodo.utilities import format_size_for_user from raphodo.metadata.metadataphoto import MetaData, photo_date_time from raphodo.metadata.metadataexiftool import MetadataExiftool from raphodo.fileformats import FileType vmtouch_cmd = 'vmtouch -v "{}"' page_size = resource.getpagesize() to_kb = page_size // 1024 JPEG_EXTENSIONS = ["jpg", "jpe", "jpeg"] class PreviewSource(IntEnum): preview_1 = 0 preview_2 = 1 preview_3 = 2 preview_4 = 3 preview_5 = 4 preview_6 = 5 all_metadata_tags = (
print "Sockets Created" s.bind((HOST, PORT)) #s2.bind((HOST, PORT2)) print "Socket Binds Complete" s.listen(1) #s2.listen(1) print "Sockets now listening" # message sizes for sending to client messageSize = 128 #buffer = np.zeros(messageSize, dtype = 'uint32') startCode = 1471741 endCode = 1741471 memDev = open("/dev/mem", "r+b") settingMem = mmap.mmap(memDev.fileno(), resource.getpagesize(), offset=SET_LOC, prot=mmap.PROT_READ) dataMem = mmap.mmap(memDev.fileno(), TOT_DAT*DAT_SIZE, offset=DAT_LOC, prot=mmap.PROT_READ) def setReferenceAmplitude(): amplitude = connectionr.recv(4) subprocess.call("./settings a %f" % (struct.unpack("f", amplitude)[0]), shell=True) def setDacMultipliers(): rawData = connectionr.recv(8) multipliers = struct.unpack("2i", rawData) subprocess.call("./settings d %d %d" % (multipliers[0], multipliers[1]), shell=True) def setSweepParameters(): rawData = connectionr.recv(12) sweepData = struct.unpack("3f", rawData) subprocess.call("./settings s %f %f %f" % (sweepData[0], sweepData[1], sweepData[2]), shell=True)
def func(self): "Show list." caller = self.caller # display active processes if not utils.host_os_is('posix'): string = "Process listings are only available under Linux/Unix." caller.msg(string) return global _resource, _idmapper if not _resource: import resource as _resource if not _idmapper: from src.utils.idmapper import base as _idmapper import resource loadavg = os.getloadavg() psize = _resource.getpagesize() pid = os.getpid() rmem = float( os.popen('ps -p %d -o %s | tail -1' % (pid, "rss")).read()) / 1000.0 # resident memory vmem = float( os.popen('ps -p %d -o %s | tail -1' % (pid, "vsz")).read()) / 1000.0 # virtual memory pmem = float( os.popen( 'ps -p %d -o %s | tail -1' % (pid, "%mem")).read()) # percent of resident memory to total rusage = resource.getrusage(resource.RUSAGE_SELF) if "mem" in self.switches: caller.msg( "Memory usage: RMEM: {w%g{n MB (%g%%), VMEM (res+swap+cache): {w%g{n MB." % (rmem, pmem, vmem)) return if "flushmem" in self.switches: caller.msg( "Flushed object idmapper cache. Python garbage collector recovered memory from %i objects." % _idmapper.flush_cache()) return # load table loadtable = prettytable.PrettyTable(["property", "statistic"]) loadtable.align = 'l' loadtable.add_row(["Server load (1 min)", "%g" % loadavg[0]]) loadtable.add_row(["Process ID", "%g" % pid]), loadtable.add_row(["Bytes per page", "%g " % psize]) loadtable.add_row([ "CPU time used (total)", "%s (%gs)" % (utils.time_format(rusage.ru_utime), rusage.ru_utime) ]) loadtable.add_row([ "CPU time used (user)", "%s (%gs)" % (utils.time_format(rusage.ru_stime), rusage.ru_stime) ]) loadtable.add_row(["Memory usage", "%g MB (%g%%)" % (rmem, pmem)]) loadtable.add_row([ "Virtual address space\n {x(resident+swap+caching){n", "%g MB" % vmem ]) loadtable.add_row([ "Page faults", "%g hard, %g soft, %g swapouts" % (rusage.ru_majflt, rusage.ru_minflt, rusage.ru_nswap) ]) loadtable.add_row([ "Disk I/O", "%g reads, %g writes" % (rusage.ru_inblock, rusage.ru_oublock) ]) loadtable.add_row([ "Network I/O", "%g in, %g out" % (rusage.ru_msgrcv, rusage.ru_msgsnd) ]) loadtable.add_row([ "Context switching", "%g vol, %g forced, %g signals" % (rusage.ru_nvcsw, rusage.ru_nivcsw, rusage.ru_nsignals) ]) string = "{wServer CPU and Memory load:{n\n%s" % loadtable if not is_pypy: # Cache size measurements are not available on PyPy # because it lacks sys.getsizeof # object cache size total_num, cachedict = _idmapper.cache_size() sorted_cache = sorted( [(key, num) for key, num in cachedict.items() if num > 0], key=lambda tup: tup[1], reverse=True) memtable = prettytable.PrettyTable( ["entity name", "number", "idmapper %%"]) memtable.align = 'l' for tup in sorted_cache: memtable.add_row([ tup[0], "%i" % tup[1], "%.2f" % (float(tup[1]) / total_num * 100) ]) # get sizes of other caches string += "\n{w Entity idmapper cache:{n %i items\n%s" % ( total_num, memtable) caller.msg(string)
} TRACEPOINT_PROBE(percpu, percpu_free_percpu) { return gen_free_enter((struct pt_regs *)args, (void *)args->ptr); } """ if kernel_trace: if args.percpu: bpf_source += bpf_source_percpu else: bpf_source += bpf_source_kernel bpf_source = bpf_source.replace("SHOULD_PRINT", "1" if trace_all else "0") bpf_source = bpf_source.replace("SAMPLE_EVERY_N", str(sample_every_n)) bpf_source = bpf_source.replace("PAGE_SIZE", str(resource.getpagesize())) size_filter = "" if min_size is not None and max_size is not None: size_filter = "if (size < %d || size > %d) return 0;" % \ (min_size, max_size) elif min_size is not None: size_filter = "if (size < %d) return 0;" % min_size elif max_size is not None: size_filter = "if (size > %d) return 0;" % max_size bpf_source = bpf_source.replace("SIZE_FILTER", size_filter) stack_flags = "0" if not kernel_trace: stack_flags += "|BPF_F_USER_STACK" bpf_source = bpf_source.replace("STACK_FLAGS", stack_flags)
def __update_statusbar(self): while not self.__stoped: self.statusbar.text("Running. Time: {} Memory: {} KB."\ .format(self.chrono.time, resource.getpagesize()/1024))
def analysis_lap(sliceno, badmap_fh, first_lap): known_line_count = 0 badmap_size = 0 badmap_fd = -1 res_bad_count = {} res_default_count = {} res_minmax = {} link_candidates = [] if first_lap: record_bad = options.filter_bad skip_bad = 0 else: record_bad = 0 skip_bad = options.filter_bad minmax_fn = 'minmax%d' % (sliceno, ) dw = DatasetWriter() for colname, coltype in options.column2type.iteritems(): out_fn = dw.column_filename(options.rename.get( colname, colname)).encode('ascii') if ':' in coltype and not coltype.startswith('number:'): coltype, fmt = coltype.split(':', 1) _, cfunc, pyfunc = dataset_typing.convfuncs[coltype + ':*'] if '%f' in fmt: # needs to fall back to python version cfunc = None if not cfunc: pyfunc = pyfunc(coltype, fmt) else: _, cfunc, pyfunc = dataset_typing.convfuncs[coltype] fmt = ffi.NULL d = datasets.source assert d.columns[colname].type in ( 'bytes', 'ascii', ), colname if options.filter_bad: line_count = d.lines[sliceno] if known_line_count: assert line_count == known_line_count, (colname, line_count, known_line_count) else: known_line_count = line_count pagesize = getpagesize() badmap_size = (line_count // 8 // pagesize + 1) * pagesize badmap_fh.truncate(badmap_size) badmap_fd = badmap_fh.fileno() if d.columns[colname].backing_type.startswith('_v2_'): backing_format = 2 else: backing_format = 3 in_fn = d.column_filename(colname, sliceno).encode('ascii') if d.columns[colname].offsets: offset = d.columns[colname].offsets[sliceno] max_count = d.lines[sliceno] else: offset = 0 max_count = -1 if coltype == 'number': cfunc = True if coltype == 'number:int': coltype = 'number' cfunc = True fmt = "int" if cfunc: default_value = options.defaults.get(colname, ffi.NULL) if default_value is None: default_value = ffi.NULL default_value_is_None = True else: default_value_is_None = False bad_count = ffi.new('uint64_t [1]', [0]) default_count = ffi.new('uint64_t [1]', [0]) c = getattr(backend, 'convert_column_' + coltype) res = c(in_fn, out_fn, minmax_fn, default_value, default_value_is_None, fmt, record_bad, skip_bad, badmap_fd, badmap_size, bad_count, default_count, offset, max_count, backing_format) assert not res, 'Failed to convert ' + colname res_bad_count[colname] = bad_count[0] res_default_count[colname] = default_count[0] with type2iter[dataset_typing.typerename.get( coltype, coltype)](minmax_fn) as it: res_minmax[colname] = list(it) unlink(minmax_fn) elif pyfunc is str: # We skip it the first time around, and link it from # the source dataset if there were no bad lines. # (That happens at the end of analysis.) # We can't do that if the file is not slice-specific though. # And we also can't do it if the column is in the wrong (old) format. if skip_bad or '%s' not in d.column_filename( colname, '%s') or backing_format != 3: res = backend.filter_strings(in_fn, out_fn, badmap_fd, badmap_size, offset, max_count, backing_format) assert not res, 'Failed to convert ' + colname else: link_candidates.append(( in_fn, out_fn, )) res_bad_count[colname] = 0 res_default_count[colname] = 0 elif pyfunc is str.strip: res = backend.filter_stringstrip(in_fn, out_fn, badmap_fd, badmap_size, offset, max_count, backing_format) assert not res, 'Failed to convert ' + colname res_bad_count[colname] = 0 res_default_count[colname] = 0 else: # python func nodefault = object() if colname in options.defaults: if options.defaults[colname] is None: default_value = None else: default_value = pyfunc(options.defaults[colname]) else: default_value = nodefault if options.filter_bad: badmap = mmap(badmap_fd, badmap_size) bad_count = 0 default_count = 0 with typed_writer(dataset_typing.typerename.get( coltype, coltype))(out_fn) as fh: col_min = col_max = None for ix, v in enumerate(d.iterate(sliceno, colname)): if skip_bad: if ord(badmap[ix // 8]) & (1 << (ix % 8)): bad_count += 1 continue try: v = pyfunc(v) except ValueError: if default_value is not nodefault: v = default_value default_count += 1 elif record_bad: bad_count += 1 bv = ord(badmap[ix // 8]) badmap[ix // 8] = chr(bv | (1 << (ix % 8))) continue else: raise Exception( "Invalid value %r with no default in %s" % ( v, colname, )) if not isinstance(v, ( NoneType, str, unicode, )): if col_min is None: col_min = col_max = v if v < col_min: col_min = v if v > col_max: col_max = v fh.write(v) if options.filter_bad: badmap.close() res_bad_count[colname] = bad_count res_default_count[colname] = default_count res_minmax[colname] = [col_min, col_max] return res_bad_count, res_default_count, res_minmax, link_candidates
def _get_mem_usage(self): from resource import getpagesize, getrusage, RUSAGE_SELF mem = getrusage(RUSAGE_SELF).ru_maxrss return mem * getpagesize() / 1024 / 1024
def elapsed_mem_lap(self): r = float(getrusage(RUSAGE_SELF)[6] * getpagesize()) - self.latest_mem self.latest_mem = float(getrusage(RUSAGE_SELF)[6] * getpagesize()) return r
class SerfConnection: """ Manages RPC communication to and from a Serf agent. This is an internal class; see :class:`asyncserf.Serf` for methods you're supposed to call. ;-) """ # pylint: disable=too-many-instance-attributes # Read from the RPC socket in blocks of this many bytes. # (Typically 4k) _socket_recv_size = resource.getpagesize() _conn_id = 0 def __init__(self, tg, host="localhost", port=7373): type(self)._conn_id += 1 self._conn_id = type(self)._conn_id self.tg = tg self.host = host self.port = port self._socket = None self._seq = 0 self._handlers = {} self._send_lock = anyio.create_lock() # handler protocol: incoming messages are passed in using `.set`. # If .expect_body is True then the reader will add a body to the # request. If it's -1 then the first reply is the body-less OK (we # hope) and only subsequent replies will have a body. def __repr__(self): return "<%(class)s counter=%(c)s host=%(h)s port=%(p)s>" % { "class": self.__class__.__name__, "c": self._seq, "h": self.host, "p": self.port, } def stream(self, command, params=None, expect_body=True): """ Sends the provided command to Serf for evaluation, with any parameters as the message body. Expect a streamed reply. Returns a ``_StreamReply`` object which affords an async context manager plus async iterator, which will return replies. """ return _StreamReply(self, command, params, self._counter, expect_body) async def _call(self, command, params=None, expect_body=True, *, _reply=None): """ Sends the provided command to Serf for evaluation, with any parameters as the message body. Returns the reply object. If the connection is being torn down and no reply is explected, return ``None``. """ # pylint: disable=protected-access ## owch class SingleReply(ValueEvent): # pylint: disable=protected-access,no-self-argument """ A helper class, used to process a single reply. """ def __init__(slf, seq, expect_body): super().__init__() slf.seq = seq slf.expect_body = expect_body async def set(slf, val): # pylint: disable=arguments-differ if self._handlers is not None: del self._handlers[slf.seq] await super().set(val) async def set_error(slf, err): # pylint: disable=arguments-differ if self._handlers is not None: del self._handlers[slf.seq] await super().set_error(err) if self._socket is None: return if _reply is None: seq = self._counter _reply = SingleReply(seq, expect_body) else: seq = _reply.seq if self._handlers is not None: self._handlers[seq] = _reply else: _reply = None if params: logger.debug("%d:Send %s:%s =%s", self._conn_id, seq, command, repr(params)) else: logger.debug("%d:Send %s:%s", self._conn_id, seq, command) msg = msgpack.packb({"Seq": seq, "Command": command}) if params is not None: msg += msgpack.packb(params) async with self._send_lock: # pylint: disable=not-async-context-manager ## owch if self._socket is None: raise anyio.ClosedResourceError() await self._socket.send(msg) return _reply async def call(self, command, params=None, expect_body=True): """ Fire a single-reply command, wait for the reply (and return it). """ res = await self._call(command, params, expect_body=expect_body) if res is None: return res return await res.get() async def _handle_msg(self, msg): """Handle an incoming message. Return True if the message is incomplete, i.e. the reader should wait for a body, attach it to the message, and then call this method again. """ if self._handlers is None: logger.warning("Reader terminated:%s", msg) return try: seq = msg.head[b"Seq"] except KeyError: raise RuntimeError( # pylint:disable=raise-missing-from "Reader got out of sync: " + str(msg)) try: hdl = self._handlers[seq] except KeyError: logger.warning("Spurious message %s: %s", seq, msg) return if (msg.body is None and hdl.expect_body > 0 and (hdl.expect_body > 1 or not msg.head[b"Error"])): return True # Do this here because stream replies might arrive immediately # i.e. before the queue listener gets around to us if hdl.expect_body < 0: hdl.expect_body = -hdl.expect_body if msg.head[b"Error"]: await hdl.set_error(SerfError(msg)) await anyio.sleep(0.01) else: await hdl.set(msg) return False async def _reader(self, *, result: ValueEvent = None): """Main loop for reading TODO: add a timeout for receiving message bodies. """ unpacker = msgpack.Unpacker(object_hook=self._decode_addr_key) cur_msg = None async with anyio.open_cancel_scope(shield=True) as s: if result is not None: await result.set(s) try: while self._socket is not None: if cur_msg is not None: logger.debug("%d:wait for body", self._conn_id) try: async with anyio.fail_after( 5 if cur_msg else math.inf): buf = await self._socket.receive( self._socket_recv_size) except TimeoutError: seq = cur_msg.head.get(b"Seq", None) hdl = self._handlers.get(seq, None) if hdl is not None: await hdl.set_error(SerfTimeout(cur_msg)) else: raise SerfTimeout(cur_msg) from None except anyio.ClosedResourceError: return # closed by us except OSError as err: if err.errno == errno.EBADF: return raise if len(buf) == 0: # Connection was closed. raise SerfClosedError("Connection closed by peer") unpacker.feed(buf) for msg in unpacker: if cur_msg is not None: logger.debug("%d::Body=%s", self._conn_id, msg) cur_msg.body = msg await self._handle_msg(cur_msg) cur_msg = None else: logger.debug("%d:Recv =%s", self._conn_id, msg) msg = SerfResult(msg) if await self._handle_msg(msg): cur_msg = msg finally: hdl, self._handlers = self._handlers, None async with anyio.open_cancel_scope(shield=True): for m in hdl.values(): await m.cancel() async def handshake(self): """ Sets up the connection with the Serf agent and does the initial handshake. """ return await self.call("handshake", {"Version": 1}, expect_body=False) async def auth(self, auth_key): """ Performs the initial authentication on connect """ return await self.call("auth", {"AuthKey": auth_key}, expect_body=False) @asynccontextmanager async def _connected(self): """ This async context manager handles the actual TCP connection to the Serf process. """ reader = None try: async with await anyio.connect_tcp(self.host, self.port) as sock: self._socket = sock reader = await self.tg.spawn(self._reader) yield self except socket.error as e: raise SerfConnectionError(self.host, self.port) from e finally: sock, self._socket = self._socket, None if sock is not None: await sock.aclose() if reader is not None: await reader.cancel() reader = None @property def _counter(self): """ Returns the current value of our message sequence counter and increments it. """ current = self._seq self._seq += 1 return current @staticmethod def _decode_addr_key(obj_dict): """ Callback function to handle the decoding of the 'Addr' field. Serf msgpack 'Addr' as an IPv6 address, and the data needs to be unpacked using socket.inet_ntop(). :param obj_dict: A dictionary containing the msgpack map. :return: A dictionary with the correct 'Addr' format. """ key = b"Addr" ip_addr = obj_dict.get(key, None) if ip_addr is not None: if len(ip_addr) == 4: # IPv4 ip_addr = socket.inet_ntop(socket.AF_INET, obj_dict[key]) else: ip_addr = socket.inet_ntop(socket.AF_INET6, obj_dict[key]) # Check if the address is an IPv4 mapped IPv6 address: # ie. ::ffff:xxx.xxx.xxx.xxx if ip_addr.startswith("::ffff:"): ip_addr = ip_addr[7:] # The msgpack codec is set to raw, # thus everything needs to be bytes obj_dict[key] = ip_addr.encode("utf-8") return obj_dict
def main_script(page, rev=None, params=NotImplemented): """Main thread.""" # http://opensourcehacker.com/2011/02/23/temporarily-capturing-python-logging-output-to-a-string-buffer/ # safety; default mode is safe (no writing) pywikibot.config.simulate = True pywikibot.output(u'--- ' * 20) buffer = StringIO() rootLogger = logging.getLogger() logHandler = logging.StreamHandler(buffer) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') logHandler.setFormatter(formatter) rootLogger.addHandler(logHandler) sys.stdout = buffer sys.stderr = buffer # all output to logging and stdout/stderr is catched BUT NOT lua output (!) if rev is None: code = page.get() # shell; "on demand" else: code = page.getOldVersion(rev) # crontab; scheduled try: exec(code) except Exception: # (done according to subster in trunk and submit in # rewrite/.../data/api.py) # secure traceback print (from api.py submit) pywikibot.exception(tb=True) sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ # Remove our handler rootLogger.removeHandler(logHandler) logHandler.flush() buffer.flush() pywikibot.output(u'--- ' * 20) # safety; restore settings pywikibot.config.simulate = __simulate sys.argv = __sys_argv if resource: pywikibot.output( u'environment: garbage; %s / memory; %s / members; %s' % ( gc.collect(), resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize(), len(dir()))) else: pywikibot.output( u'environment: garbage; %s / members; %s' % ( gc.collect(), len(dir()))) # 'len(dir())' is equivalent to 'len(inspect.getmembers(__main__))' # append result to output page if rev is None: wiki_logger(buffer.getvalue(), page, rev)
import platform from collections import OrderedDict import subprocess import shlex import os import resource import logging logging.basicConfig(filename='mygetopt.log', level=logging.INFO) pagesize = resource.getpagesize() # Size of the page needed to convert to MB def python_info(): ''' :return: Python-Versions-String (ohne Leerzeichen) ''' return platform.python_version() def system_info(): ''' :return: Dictionary mit entspr. Systeminformationen ''' os = platform.system() if os == 'Darwin': return OrderedDict([("LABEL", "CONTENT"), ("Architecture", platform.architecture()[0]),
def main(): # Initial Variables argv = sys.argv pid = argv[1] # Getting the exact paths to /proc/[pid] with os.path.join, 'just in case'... proc_pid_path = os.path.join('/proc', pid) maps_path = os.path.join(proc_pid_path, 'maps') pagemap_path = os.path.join(proc_pid_path, 'pagemap') valid_ranges = [] maps_line_list = [] # Get all the lines in map try: maps_line_list = [line.rstrip('\n') for line in open(maps_path)] except FileNotFoundError: print("Process is not listed in /proc") return # Getting virtual address range from /proc/[pid]/maps page_size = resource.getpagesize() for i, line in enumerate(maps_line_list): maps_line_list[i] = line.split() mem_range = maps_line_list[i][0] start, end = mem_range.split(sep='-') # We get the exact start of a page dividing its Vddr by the OS's page size start = int(start, 16) // page_size end = int(end, 16) // page_size valid_ranges.append((start, end)) # For each range found, for each map in this range, find its page table entry present = 0 not_present = 0 pages_in_ram = [] pages_not_in_ram = [] page_table_entry_size = 8 for virtual_range in valid_ranges: start = virtual_range[0] end = virtual_range[1] current = start while current != end: # In some specific Linux Kernels, there is a page dedicated to [vsyscall] # This page has a quite irregular virtual address that or: # 1: Leads to some thrash Page Table Entries # 2: Leads to nowhere, effectively breaking the code # Therefore, it was chosen to ignore this page ("0xffffffffff600") as it's very problematic if current == 0xffffffffff600: current += 1 not_present += 1 continue # read_entry takes the offset of the Vaddr # We already divided Vaddr by the page size # Now we just multiply by the page table entry size(should always be 8 bytes) entry = read_entry(pagemap_path, current * page_table_entry_size) pfn = get_pfn(entry) if is_present(entry): pages_in_ram.append((current, pfn, entry)) present += 1 else: pages_not_in_ram.append((current, pfn, entry)) not_present += 1 current += 1 print("PID: {}".format(pid)) print("Total number of pages found: {}".format(present + not_present)) print("Pages that are in RAM: {}".format(present)) print("Pages that are not in RAM: {}".format(not_present)) print("Estimated RAM usage: {} KB".format(present * 4)) print("Estimated TOTAL usage: {} KB".format( (present + not_present + 1) * 4)) print( "=================== Pages that are not in RAM [{}]===================" .format(not_present)) for i, page in enumerate(pages_not_in_ram): print_page(page, i) print("=================== Pages in RAM [{}]===================".format( present)) for i, page in enumerate(pages_in_ram): print_page(page, i)
PCI_BAR_IO = 0x01 PCI_BAR_IO_MASK = ~0x03 PCI_BAR_MEM = 0x00 PCI_BAR_MEM_MASK = ~0x0f PCI_STATUS_CAP_MASK = 0x10 PCI_STATUS_OFFSET = 0x6 PCI_CAP_OFFSET = 0x34 MSIX_BIR_MASK = 0x7 MSIX_SIZE_MASK = 0x7ff # Global variable to store information from lspci lspci_info = None lspci_info_lock = threading.RLock() #Calculate PAGE_SHIFT: number of bits to shift an address to get the page number PAGE_SIZE = resource.getpagesize() PAGE_SHIFT = 0 t = PAGE_SIZE while not (t & 1): t >>= 1 PAGE_SHIFT += 1 PAGE_MASK = ~(PAGE_SIZE - 1) # Definitions from Linux: include/linux/pci.h def PCI_DEVFN(slot, func): return ((((slot) & 0x1f) << 3) | ((func) & 0x07)) def PCI_SLOT(devfn):
import resource print("usage stats", "=>", resource.getrusage(resource.RUSAGE_SELF)) print("max cpu", "=>", resource.getrlimit(resource.RLIMIT_CPU)) print("max data", "=>", resource.getrlimit(resource.RLIMIT_DATA)) print("max processes", "=>", resource.getrlimit(resource.RLIMIT_NPROC)) print("page size", "=>", resource.getpagesize()) ## usage stats => (0.03, 0.02, 0, 0, 0, 0, 75, 168, 0, 0, 0, 0, 0, 0, 0, 0) ## max cpu => (2147483647, 2147483647) ## max data => (2147483647, 2147483647) ## max processes => (256, 256) ## page size => 4096
s.bind((HOST, PORT)) #s2.bind((HOST, PORT2)) print "Socket Binds Complete" s.listen(1) #s2.listen(1) print "Sockets now listening" # message sizes for sending to client messageSize = 128 #buffer = np.zeros(messageSize, dtype = 'uint32') startCode = 1471741 endCode = 1741471 memDev = open("/dev/mem", "r+b") settingMem = mmap.mmap(memDev.fileno(), resource.getpagesize(), offset=SET_LOC, prot=mmap.PROT_READ) dataMem = mmap.mmap(memDev.fileno(), TOT_DAT * DAT_SIZE, offset=DAT_LOC, prot=mmap.PROT_READ) def setReferenceAmplitude(): amplitude = connectionr.recv(4) subprocess.call("./settings a %f" % (struct.unpack("f", amplitude)[0]), shell=True) def setDacMultipliers():
def linux_current(): with open("/proc/self/statm", "r") as f: return int(f.readline().split()[0]) * resource.getpagesize()
type=str, default="test.mpt", help="output test file") parser.add_argument("-s", "--state", type=str, default="state.mps", help="output state file") parser.add_argument("-d", "--data", type=str, default="data", help="data path") parser.add_argument("-i", "--index", type=str, default="1", help="select invocation") parser.add_argument("--code", action="store_true", help="use code information") parser.add_argument("--pagesize", type=int, default=resource.getpagesize()) args = parser.parse_args() modules = [] maps = open(path.join(args.data, "maps.{}".format(args.index)), "r") for line in maps: words = line.split() if words[1] != "r-xp": continue if len(words) < 6: continue module = words[5] if module[0] == "[": continue if "/libcxtrace" in module: continue if "/libc." in module: continue if "/libc-" in module: continue if "/ld-" in module: continue
def rssUpdate(self, frames): """Updates the rss and maxrss for all running frames""" if platform.system() == 'Windows' and winpsIsAvailable: values = list(frames.values()) pids = [frame.pid for frame in list( filter(lambda frame: frame.pid > 0, values) )] # pylint: disable=no-member stats = winps.update(pids) # pylint: enable=no-member for frame in values: self.__updateGpuAndLlu(frame) if frame.pid > 0 and frame.pid in stats: stat = stats[frame.pid] frame.rss = stat["rss"] // 1024 frame.maxRss = max(frame.rss, frame.maxRss) frame.runFrame.attributes["pcpu"] = str( stat["pcpu"] * self.__coreInfo.total_cores ) return if platform.system() != 'Linux': return pids = {} for pid in os.listdir("/proc"): if pid.isdigit(): try: with open("/proc/%s/stat" % pid, "r") as statFile: statFields = [None, None] + statFile.read().rsplit(")", 1)[-1].split() # See "man proc" pids[pid] = { "session": statFields[5], "vsize": statFields[22], "rss": statFields[23], # These are needed to compute the cpu used "utime": statFields[13], "stime": statFields[14], "cutime": statFields[15], "cstime": statFields[16], # The time in jiffies the process started # after system boot. "start_time": statFields[21], } # pylint: disable=broad-except except Exception: log.exception('failed to read stat file for pid %s', pid) # pylint: disable=too-many-nested-blocks try: now = int(time.time()) pidData = {"time": now} bootTime = self.getBootTime() values = list(frames.values()) for frame in values: if frame.pid > 0: session = str(frame.pid) rss = 0 vsize = 0 pcpu = 0 for pid, data in pids.items(): if data["session"] == session: try: rss += int(data["rss"]) vsize += int(data["vsize"]) # jiffies used by this process, last two means that dead # children are counted totalTime = int(data["utime"]) + \ int(data["stime"]) + \ int(data["cutime"]) + \ int(data["cstime"]) # Seconds of process life, boot time is already in seconds seconds = now - bootTime - \ float(data["start_time"]) / rqd.rqconstants.SYS_HERTZ if seconds: if pid in self.__pidHistory: # Percent cpu using decaying average, 50% from 10 seconds # ago, 50% from last 10 seconds: oldTotalTime, oldSeconds, oldPidPcpu = \ self.__pidHistory[pid] # checking if already updated data if seconds != oldSeconds: pidPcpu = ((totalTime - oldTotalTime) / float(seconds - oldSeconds)) pcpu += (oldPidPcpu + pidPcpu) / 2 # %cpu pidData[pid] = totalTime, seconds, pidPcpu else: pidPcpu = totalTime / seconds pcpu += pidPcpu pidData[pid] = totalTime, seconds, pidPcpu # pylint: disable=broad-except except Exception as e: log.warning( 'Failure with pid rss update due to: %s at %s', e, traceback.extract_tb(sys.exc_info()[2])) rss = (rss * resource.getpagesize()) // 1024 vsize = int(vsize/1024) frame.rss = rss frame.maxRss = max(rss, frame.maxRss) frame.vsize = vsize frame.maxVsize = max(vsize, frame.maxVsize) frame.runFrame.attributes["pcpu"] = str(pcpu) self.__updateGpuAndLlu(frame) # Store the current data for the next check self.__pidHistory = pidData # pylint: disable=broad-except except Exception as e: log.exception('Failure with rss update due to: %s', e)
def format_memory_usage(point="memory usage"): usage = resource.getrusage(resource.RUSAGE_SELF) txt = "%s: usertime=%s systime=%s mem=" + bcolors.FAIL + "%s" + bcolors.ENDC + " mb" return txt % (point, usage[0], usage[1], (usage[2] * resource.getpagesize()) / 1000000.0)
def get_page_size(): return resource.getpagesize()
def nl_recv(sk, nla, buf, creds=None): """Receive data from Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L625 Receives data from a connected netlink socket using recvmsg() and returns the number of bytes read. The read data is stored in a newly allocated buffer that is assigned to `buf`. The peer's netlink address will be stored in `nla`. This function blocks until data is available to be read unless the socket has been put into non-blocking mode using nl_socket_set_nonblocking() in which case this function will return immediately with a return value of 0. The buffer size used when reading from the netlink socket and thus limiting the maximum size of a netlink message that can be read defaults to the size of a memory page (getpagesize()). The buffer size can be modified on a per socket level using the function `nl_socket_set_msg_buf_size()`. If message peeking is enabled using nl_socket_enable_msg_peek() the size of the message to be read will be determined using the MSG_PEEK flag prior to performing the actual read. This leads to an additional recvmsg() call for every read operation which has performance implications and is not recommended for high throughput protocols. An eventual interruption of the recvmsg() system call is automatically handled by retrying the operation. If receiving of credentials has been enabled using the function `nl_socket_set_passcred()`, this function will allocate a new struct `ucred` filled with the received credentials and assign it to `creds`. Positional arguments: sk -- Netlink socket (nl_sock class instance) (input). nla -- Netlink socket structure to hold address of peer (sockaddr_nl class instance) (output). buf -- destination bytearray() for message content (output). creds -- destination class instance for credentials (ucred class instance) (output). Returns: Two-item tuple. First item is number of bytes read, 0 on EOF, 0 on no data event (non-blocking mode), or a negative error code. Second item is the message content from the socket or None. """ flags = 0 page_size = resource.getpagesize() * 4 if sk.s_flags & NL_MSG_PEEK: flags |= socket.MSG_PEEK | socket.MSG_TRUNC iov_len = sk.s_bufsize or page_size if creds and sk.s_flags & NL_SOCK_PASSCRED: raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2 while True: # This is the `goto retry` implementation. try: if hasattr(sk.socket_instance, 'recvmsg'): iov, _, msg_flags, address = sk.socket_instance.recvmsg( iov_len, 0, flags) else: iov, address = sk.socket_instance.recvfrom(iov_len, flags) msg_flags = 0 except OSError as exc: if exc.errno == errno.EINTR: continue # recvmsg() returned EINTR, retrying. return -nl_syserr2nlerr(exc.errno) nla.nl_family = sk.socket_instance.family # recvmsg() in C does this, but not Python's. if not iov: return 0 if msg_flags & socket.MSG_CTRUNC: raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2 if iov_len < len(iov) or msg_flags & socket.MSG_TRUNC: # Provided buffer is not long enough. # Enlarge it to size of n (which should be total length of the message) and try again. iov_len = len(iov) continue if flags: # Buffer is big enough, do the actual reading. flags = 0 continue nla.nl_pid = address[0] nla.nl_groups = address[1] if creds and sk.s_flags * NL_SOCK_PASSCRED: raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2 if iov: buf += iov return len(buf)
def mpi_child(fn): """ An MPI child wrapper that will call the supplied function in a child context - reading its arguments from mpicomm.recv(source=0). """ rank = MPI.COMM_WORLD.Get_rank() logger.debug("Child {} (remote) starting".format(rank)) while True: # A little sleep to let everything start... time.sleep(3) # Send ready logger.debug("Child {} (remote) sending hello".format(rank)) try: MPI.COMM_WORLD.send(True, dest=0) except Exception: # Sometimes we see messages like this: # [bb2a3c26][[4455,1],95][btl_tcp_endpoint.c:818:mca_btl_tcp_endpoint_complete_connect] connect() to 169.254.95.120 failed: Connection refused (111) # That seems to kill the process... and we're lost. logger.warning("Error saying hello", exc_info=True) time.sleep(5) continue else: logger.debug("Child {} (remote) sent hello".format(rank)) start = time.time() retry = False # child - wait to be given a data structure while not MPI.COMM_WORLD.Iprobe(source=0): if time.time() - start > CHILD_RETRY_HELLO: retry = True break time.sleep(1) if retry: logger.debug( "Child {} (remote) heard nothing from parent - will send another hello" .format(rank)) continue try: args = MPI.COMM_WORLD.recv(source=0) except EOFError: logger.exception( "Child {} error receiving instructions - carrying on".format( rank)) continue if args is None: logger.info( "Child {} (remote) exiting - no args received".format(rank)) break logger.debug("Child {} (remote) received data".format(rank)) ret = fn(*args) mem_raw = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss mem_size = resource.getpagesize() mem_bytes = mem_raw * mem_size meminfo = "{:.2f} MB".format(mem_bytes / 1024**2) if ret is None: # Nothing was generated MPI.COMM_WORLD.send((None, meminfo), dest=0) logger.info("Child {} (remote) aborted job".format(rank)) else: logger.debug("Child {} (remote) sending results back".format(rank)) MPI.COMM_WORLD.send((ret, meminfo), dest=0) logger.debug("Child {} (remote) completed job".format(rank))
#logging.basicConfig(level=logging.DEBUG) logging.basicConfig() logging.debug('Starting') gmond_prefix='lusclt_' gmond_group='lusclt' tmax = 3600 descriptors = [] stats = dict() last_val = dict() cur_time = 0.0 last_update = 0.0 PAGE_SIZE = getpagesize() MAX_UPDATE_TIME = 5 #LLITE_DIR= '/tmp/llite' LLITE_DIR = '/proc/fs/lustre/llite' #IGNORE_FS ############################################################################## def get_lus_version(): '''return lustre version number''' fsvers = '/proc/fs/lustre/version' logging.debug(' opening file ' + fsvers) try: fobj = open(fsvers) except IOError:
# PRE Workbench # Copyright (C) 2019 Max Weller # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from pathlib import Path from resource import getpagesize PAGESIZE = getpagesize() PATH = Path('/proc/self/statm') def get_resident_set_size() -> int: """Return the current resident set size in bytes.""" # statm columns are: size resident shared text lib data dt statm = PATH.read_text() fields = statm.split() return int(fields[1]) * PAGESIZE
# encoding: utf-8 """ usage.py Created by Thomas Mangin on 2009-09-06. Copyright (c) 2009-2013 Exa Networks. All rights reserved. """ import sys import resource if sys.platform == 'darwin': # darwin returns bytes divisor = 1024.0*1024.0 else: # other OS (AFAIK) return a number of pages divisor = 1024.0*1024.0/resource.getpagesize() def usage (label='usage'): usage=resource.getrusage(resource.RUSAGE_SELF) return '%s: usertime=%s systime=%s mem=%s mb' % (label,usage.ru_utime,usage.ru_stime,(usage.ru_maxrss/divisor))
class SerfConnection(object): """ Manages RPC communication to and from a Serf agent. """ # Read from the RPC socket in blocks of this many bytes. # (Typically 4k) _socket_recv_size = resource.getpagesize() def __init__(self, host='localhost', port=7373, timeout=3): self.host = host self.port = port self.timeout = timeout self._socket = None self._seq = 0 def __repr__(self): return "%(class)s<counter=%(c)s,host=%(h)s,port=%(p)s,timeout=%(t)s>" \ % {'class': self.__class__.__name__, 'c': self._seq, 'h': self.host, 'p': self.port, 't': self.timeout} def call(self, command, params=None, expect_body=True, stream=False): """ Sends the provided command to Serf for evaluation, with any parameters as the message body. """ if self._socket is None: raise SerfConnectionError('handshake must be made first') header = msgpack.packb({"Seq": self._counter(), "Command": command}) if params is not None: body = msgpack.packb(params) self._socket.sendall(header + body) else: self._socket.sendall(header) unpacker = msgpack.Unpacker(object_hook=self._decode_addr_key) def read_from_socket(): try: buf = self._socket.recv(self._socket_recv_size) if len(buf) == 0: # Connection was closed. raise SerfConnectionError("Connection closed by peer") unpacker.feed(buf) except socket.timeout: raise SerfTimeout( "timeout while waiting for an RPC response. (Have %s so" "far)", response) if stream: def keep_reading_from_stream(init=[]): sub_response = SerfResult() while True: if init is not None: it = init init = None else: if self._socket is None: return read_from_socket() it = unpacker for msg in it: if sub_response.head is None: sub_response.head = msg elif sub_response.body is None: sub_response.body = msg yield sub_response sub_response = SerfResult() mem = [] messages_expected = 1 while messages_expected > 0: read_from_socket() # Might have received enough to deserialise one or more # messages, try to fill out the response object. for message in unpacker: mem.append(message) messages_expected -= 1 # Disable timeout while we are in streaming mode self._socket.settimeout(None) response = SerfResult() response.head = mem.pop() response.body = keep_reading_from_stream(mem) else: # The number of msgpack messages that are expected # in response to this command. messages_expected = 2 if expect_body else 1 response = SerfResult() # Continue reading from the network until the expected number of # msgpack messages have been received. while messages_expected > 0: read_from_socket() # Might have received enough to deserialise one or more # messages, try to fill out the response object. for message in unpacker: if response.head is None: response.head = message elif response.body is None: response.body = message else: raise SerfProtocolError( "protocol handler got more than 2 messages. " "Unexpected message is: %s", message) # Expecting one fewer message now. messages_expected -= 1 return response def handshake(self): """ Sets up the connection with the Serf agent and does the initial handshake. """ if self._socket is None: self._socket = self._connect() return self.call('handshake', {"Version": 1}, expect_body=False) def auth(self, auth_key): """ Performs the initial authentication on connect """ if self._socket is None: self._socket = self._connect() return self.call('auth', {"AuthKey": auth_key}, expect_body=False) def _connect(self): try: return socket.create_connection((self.host, self.port), self.timeout) except socket.error: e = sys.exc_info()[1] raise SerfConnectionError(self._error_message(e)) def _counter(self): """ Returns the current value of the iterator and increments it. """ current = self._seq self._seq += 1 return current def _error_message(self, exception): return "Error %s connecting %s:%s. %s." % \ (exception.args[0], self.host, self.port, exception.args[1]) def _decode_addr_key(self, obj_dict): """ Callback function to handle the decoding of the 'Addr' field. Serf msgpack 'Addr' as an IPv6 address, and the data needs to be unpack using socket.inet_ntop(). See: https://github.com/KushalP/serfclient-py/issues/20 :param obj_dict: A dictionary containing the msgpack map. :return: A dictionary with the correct 'Addr' format. """ key = b'Addr' if key in obj_dict: try: # Try to convert a packed IPv6 address. # Note: Call raises ValueError if address is actually IPv4. ip_addr = socket.inet_ntop(socket.AF_INET6, obj_dict[key]) # Check if the address is an IPv4 mapped IPv6 address: # ie. ::ffff:xxx.xxx.xxx.xxx if ip_addr.startswith('::ffff:'): ip_addr = ip_addr.lstrip('::ffff:') obj_dict[key] = ip_addr.encode('utf-8') except ValueError: # Try to convert a packed IPv4 address. ip_addr = socket.inet_ntop(socket.AF_INET, obj_dict[key]) obj_dict[key] = ip_addr.encode('utf-8') return obj_dict def close(self): """ Close the connection with the Serf agent. """ if self._socket: self._socket.close() self._socket = None
def elapsed_mem(self): return float( getrusage(RUSAGE_SELF)[6] * getpagesize()) - self.start_mem
def using(point = ""): usage = resource.getrusage(resource.RUSAGE_SELF) return '''%s: usertime=%s systime=%s mem=%s mb'''%(point,usage[0],usage[1],(usage[2]*resource.getpagesize())/1000000.0 )