def _os_getpid(): name = ManagementFactory.getRuntimeMXBean().getName() _id, _at, _name = name.partition('@') try: return int(_id) except ValueError: return 1
def cause_deadlock(): counter = [0] lock_one = Lock() lock_two = Lock() threads = [ Thread(name="thread #1", target=acquire_locks, args=(counter, lock_one, lock_two)), Thread(name="thread #2 (reversed)", target=acquire_locks, args=(counter, lock_two, lock_one)) ] for thread in threads: thread.setDaemon(True) # make shutdown possible after deadlock thread.start() thread_mxbean = ManagementFactory.getThreadMXBean() while True: time.sleep(1) print "monitoring thread", counter[0] thread_ids = thread_mxbean.findDeadlockedThreads() if thread_ids: print "monitoring thread: deadlock detected, shutting down", list( thread_ids) break
def getStatistic_common(name): memoryBean = ManagementFactory.getMemoryMXBean() if name == "enabler_HEAP_MEMORY_USAGE": bytes = memoryBean.getHeapMemoryUsage().getUsed() return bytes / 1024 / 1024 # convert to MiB elif name == "enabler_NON_HEAP_MEMORY_USAGE": bytes = memoryBean.getNonHeapMemoryUsage().getUsed() return bytes / 1024 / 1024 # convert to MiB elif name == "enabler_DATANODE_DECOMMISION_REQUESTS": hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue() if ContainerUtils.isWindows(): raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.") else: commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True) if (output[0] == 0): stdout = str(output[1]) count = int(stdout.split()[1]) return int(count) elif (output[0] == 255): # Decommission request directory doesn't exist. Not expected to exist until the some datanode posts the first request return int(0) else: ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + "] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic. Assuming 0.") print output return int(0) elif name.startswith('enabler_DISK_'): tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue() if name == "enabler_DISK_SPACE_FREE": blocks = int(getStatistic_disk(tmpdir)[0]) return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB #return available elif name == "enabler_DISK_SPACE_USED": blocks = int(getStatistic_disk(tmpdir)[1]) return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB #return used elif name == "enabler_DISK_SPACE_USED_PERCENT": return getStatistic_disk(tmpdir)[2] #return int(percent[:-1]) else: raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]") else: raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
def __init__(self, chartFun, isTemporal = False): self.isTemporal = isTemporal JPanel() #self.setBackground(Color.LIGHT_GRAY) self.chartFun = chartFun self.enableChartFun = False self.setLayout(GridLayout(6,2)) self.add(JLabel('CPU Cores')) nprocs = ManagementFactory.getOperatingSystemMXBean().getAvailableProcessors() procvals = sorted(set([nprocs, 1, 2, 4, 8, 16, 32, 64, 128])) cores = JComboBox(procvals) cores.setSelectedIndex(procvals.index(nprocs)) cores.setMaximumSize(cores.getPreferredSize()) self.cores = cores self.add(self.cores) self.add(JLabel('# of sims (x1000) ')) numSims = JComboBox( map(lambda x: str((10+x)*5), range(10)) + map(lambda x: str(x*100), range(1,11)) ) numSims.setMaximumSize(numSims.getPreferredSize()) self.numSims = numSims self.add(self.numSims) if isTemporal: self.add(JLabel('"Neutral" Ne')) self.neutral = JCheckBox() self.neutral.addActionListener(self) self.add(self.neutral) else: self.add(JLabel('"Neutral" mean Fst')) self.neutral = JCheckBox() self.neutral.addActionListener(self) self.add(self.neutral) self.add(JLabel('Force mean Fst')) self.force = JCheckBox() self.force.addActionListener(self) self.add(self.force) self.add(JLabel('Confidence interval ')) ci = JComboBox(['0.95', '0.99', '0.995']) ci.addItemListener(self) ci.setMaximumSize(cores.getPreferredSize()) self.ci = ci self.add(self.ci) self.add(JLabel('False Disc. Rate')) fdr = JFormattedTextField( NumberFormat.getNumberInstance(Locale.US)) fdr.setValue(0.1) fdr.addPropertyChangeListener(self) self.add(fdr) self.fdr = fdr
def __init__(self, chartFun, isTemporal = False): self.isTemporal = isTemporal JPanel() #self.setBackground(Color.LIGHT_GRAY) self.chartFun = chartFun self.enableChartFun = False self.setLayout(GridLayout(6,2)) self.add(JLabel('CPU Cores')) cores = JComboBox(['1', '2', '4', '8', '16', '32', '64', '128']) nprocs = ManagementFactory.getOperatingSystemMXBean().getAvailableProcessors() pos = min([7, log(ceil(nprocs)) / log(2)]) cores.setSelectedIndex(int(pos)) cores.setMaximumSize(cores.getPreferredSize()) self.cores = cores self.add(self.cores) self.add(JLabel('# of sims (x1000) ')) numSims = JComboBox( map(lambda x: str((10+x)*5), range(10)) + map(lambda x: str(x*100), range(1,11)) ) numSims.setMaximumSize(numSims.getPreferredSize()) self.numSims = numSims self.add(self.numSims) if isTemporal: self.add(JLabel('"Neutral" Ne')) self.neutral = JCheckBox() self.neutral.addActionListener(self) self.add(self.neutral) else: self.add(JLabel('"Neutral" mean Fst')) self.neutral = JCheckBox() self.neutral.addActionListener(self) self.add(self.neutral) self.add(JLabel('Force mean Fst')) self.force = JCheckBox() self.force.addActionListener(self) self.add(self.force) self.add(JLabel('Confidence interval ')) ci = JComboBox(['0.95', '0.99', '0.995']) ci.addItemListener(self) ci.setMaximumSize(cores.getPreferredSize()) self.ci = ci self.add(self.ci) self.add(JLabel('False Disc. Rate')) fdr = JFormattedTextField( NumberFormat.getNumberInstance(Locale.US)) fdr.setValue(0.1) fdr.addPropertyChangeListener(self) self.add(fdr) self.fdr = fdr
simDt = 0.005 #simDt= 0.01 # for newly added mep tests which will be used for omv tests with jNeuroML_NEURON neuroConstructSeed = 12345 simulatorSeed = 11111 #simulators = ["NEURON", "GENESIS_PHYS", "MOOSE_PHYS", "MOOSE_SI"] #"GENESIS_SI", #simulators = ["NEURON", "GENESIS_PHYS"] #"GENESIS_SI", #simulators = ["GENESIS", "MOOSE"] simulators = ["NEURON"] #maxElecLens = [0.01] # 0.01 will give ~700 in FRB & RS #maxElecLens = [-1] # -1 means don't recompartmentalise use settings in proj #maxElecLens = [0.025, 0.01,0.005, 0.0025, 0.001,0.0005, 0.00025, 0.0001] #### Note: the function testAll() below does not recompartmentalise the cells numConcurrentSims = ManagementFactory.getOperatingSystemMXBean( ).getAvailableProcessors() - 1 if mpiConfig != MpiSettings.LOCAL_SERIAL: numConcurrentSims = 60 suggestedRemoteRunTime = 80 # mins varTimestepNeuron = False # could be more accurate with var time step in nrn, but need to compare these to jNeuroML_NEURON varTimestepTolerance = 0.00001 analyseSims = True plotSims = True plotVoltageOnly = True simAllPrefix = "" # Adds a prefix to simulation reference runInBackground = True #(mpiConf == MpiSettings.LOCAL_SERIAL)
import os import sys import shutil from org.apache.log4j import Logger from java.lang.management import ManagementFactory import unittest #sys.modules['AdminConfig'] = AdminConfig #sys.modules['AdminControl'] = AdminControl #sys.modules['AdminApp'] = AdminApp #sys.modules['AdminTask'] = AdminTask #sys.modules['Help'] = Help '''set up the paths, create the tmp working directory and make a props dictionary''' pidname=ManagementFactory.getRuntimeMXBean().getName() pidlist=pidname.split('@') pid=pidlist[0] currentWorkingDirectory=os.getcwd() #print currentWorkingDirectory binPath=currentWorkingDirectory + os.sep + "bin" confPath=currentWorkingDirectory + os.sep + "conf" logPath=currentWorkingDirectory + os.sep + "log" tmpPath=currentWorkingDirectory + os.sep + "tmp" + os.sep + pid libPath=currentWorkingDirectory + os.sep + "lib" jythonpath=libPath + os.sep + "jython" + os.sep + "Lib" srcPath=currentWorkingDirectory + os.sep + "src" testPath=currentWorkingDirectory + os.sep + "test" sys.path.append(binPath) sys.path.append(confPath) sys.path.append(logPath) sys.path.append(tmpPath)
#simDt= 0.01 # for newly added mep tests which will be used for omv tests with jNeuroML_NEURON neuroConstructSeed = 12345 simulatorSeed = 11111 #simulators = ["NEURON", "GENESIS_PHYS", "MOOSE_PHYS", "MOOSE_SI"] #"GENESIS_SI", #simulators = ["NEURON", "GENESIS_PHYS"] #"GENESIS_SI", #simulators = ["GENESIS", "MOOSE"] simulators = ["NEURON"] #maxElecLens = [0.01] # 0.01 will give ~700 in FRB & RS #maxElecLens = [-1] # -1 means don't recompartmentalise use settings in proj #maxElecLens = [0.025, 0.01,0.005, 0.0025, 0.001,0.0005, 0.00025, 0.0001] #### Note: the function testAll() below does not recompartmentalise the cells numConcurrentSims = ManagementFactory.getOperatingSystemMXBean().getAvailableProcessors() -1 if mpiConfig != MpiSettings.LOCAL_SERIAL: numConcurrentSims = 60 suggestedRemoteRunTime = 80 # mins varTimestepNeuron = False # could be more accurate with var time step in nrn, but need to compare these to jNeuroML_NEURON varTimestepTolerance = 0.00001 analyseSims = True plotSims = True plotVoltageOnly = True simAllPrefix = "" # Adds a prefix to simulation reference
import openhab from java.lang.management import ManagementFactory from com.sun.management import HotSpotDiagnosticMXBean hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean) @openhab.rule class HeapDumpRule(object): """Dump the Java heap when thread count exceeds a specified threshold""" def __init__(self, maxThreads, outpath, notify=False): self._maxThreads = maxThreads self._outpath = outpath self._notify = notify self._dumped = False def getEventTrigger(self): return [ ChangedEventTrigger("JmxThreadCount") ] def execute(self, event): if not self._dumped and event.item.state.intValue() > self._maxThreads: self._dumped = True message = "EXCESSIVE THREADS: count={}, dumping heap".format(event.item.state) self.log.error(message) if self._notify: self.sendNotification(message) hotSpotDiagnosticMXBean.dumpHeap(self._outpath, True) def getRules(): return RuleSet([ HeapDumpRule(400, "/opt/openhab-1.7.1/memory_dump.hprof", True) ])
backend_pid = inv.getLong("backend_pid") con = Hiber.session().connection() stmt = con.createStatement() stmt.execute("select pg_terminate_backend(" + str(backend_pid) + ")") stmt.close() Hiber.commit() source.appendChild(MonadMessage("Cancelled backend.").toXml(doc)) df = DecimalFormat("###,###,###,###,##0") runtime = Runtime.getRuntime() source.setAttribute("free-memory", df.format(runtime.freeMemory())) source.setAttribute("max-memory", df.format(runtime.maxMemory())) source.setAttribute("total-memory", df.format(runtime.totalMemory())) source.setAttribute("available-processors", str(runtime.availableProcessors())) source.setAttribute("system-load-average", str(ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage())) mon = JavaSysMon() source.setAttribute("cpu-frequency-in-hz", df.format(mon.cpuFrequencyInHz())) source.setAttribute("current-pid", df.format(mon.currentPid())) source.setAttribute("num-cpus", df.format(mon.numCpus())) source.setAttribute("os-name", mon.osName()) source.setAttribute("uptime-in-seconds", df.format(mon.uptimeInSeconds())) cpu = mon.cpuTimes() source.setAttribute("idle-millis", df.format(cpu.getIdleMillis())) source.setAttribute("system-millis", df.format(cpu.getSystemMillis())) source.setAttribute("total-millis", df.format(cpu.getTotalMillis())) source.setAttribute("user-millis", df.format(cpu.getUserMillis())) props = doc.createElement("properties") source.appendChild(props)
def getThreadInfo(thread): """Get the thread info object from the Java ThreadMXBean. Thing.""" from java.lang.management import ManagementFactory TMXB = ManagementFactory.getThreadMXBean() return TMXB.getThreadInfo(thread.id)
def _os_getpid(): return ManagementFactory.getRuntimeMXBean().getName()
import openhab from java.lang.management import ManagementFactory jmx_beans = { 'Threads' : ManagementFactory.getThreadMXBean(), 'Memory' : ManagementFactory.getMemoryMXBean(), } for bean in ManagementFactory.getMemoryPoolMXBeans(): jmx_beans[bean.name] = bean @openhab.rule class JmxBinding(object): # Binding all items in one rule to avoid numerous timers. If some items # should be updated at a different frequencies, then multiple rules can be # instantiated. def __init__(self, item_specs, cronspec="0 * * * * ?"): self._item_specs = item_specs self._cronspec = cronspec def getEventTrigger(self): return [ StartupTrigger(), TimerTrigger(self._cronspec) ] def execute(self, event): for item_name, bean_name, attr_path, transform in self._item_specs: try: bean = jmx_beans.get(bean_name)