def getStatistic_common(name):

    memoryBean = ManagementFactory.getMemoryMXBean()

    if name == "enabler_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    elif name == "enabler_NON_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getNonHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    
    elif name == "enabler_DATANODE_DECOMMISION_REQUESTS":
        
        hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()        

        if ContainerUtils.isWindows():
            raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        else:
            commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir
            
        output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True)
        if (output[0] == 0): 
            stdout = str(output[1])
            count = int(stdout.split()[1])
            return int(count)
        elif (output[0] == 255):
            # Decommission request directory doesn't exist.  Not expected to exist until the some datanode posts the first request  
            return int(0)    
        else:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + 
                                                    "] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic.  Assuming 0.")
            print output
            return int(0)
        
    elif name.startswith('enabler_DISK_'):

        tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()    
        
        if name == "enabler_DISK_SPACE_FREE":
            blocks = int(getStatistic_disk(tmpdir)[0])
            return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
            #return available
        elif name == "enabler_DISK_SPACE_USED":
            blocks = int(getStatistic_disk(tmpdir)[1])
            return blocks / 1024 / 1024  # convert 1024-byte blocks to GiB
            #return used
        elif name == "enabler_DISK_SPACE_USED_PERCENT":
            return getStatistic_disk(tmpdir)[2]
            #return int(percent[:-1])
        else:
            raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")

    else:
        raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
Пример #2
0
import openhab

from java.lang.management import ManagementFactory

jmx_beans = {
    'Threads' : ManagementFactory.getThreadMXBean(),
    'Memory' : ManagementFactory.getMemoryMXBean(),
}

for bean in ManagementFactory.getMemoryPoolMXBeans():
    jmx_beans[bean.name] = bean

@openhab.rule
class JmxBinding(object):
  # Binding all items in one rule to avoid numerous timers. If some items
  # should be updated at a different frequencies, then multiple rules can be
  # instantiated.
  def __init__(self, item_specs, cronspec="0 * * * * ?"):
    self._item_specs = item_specs
    self._cronspec = cronspec

  def getEventTrigger(self):
    return [ 
        StartupTrigger(),
        TimerTrigger(self._cronspec) 
    ]

  def execute(self, event):
      for item_name, bean_name, attr_path, transform in self._item_specs:
          try:
              bean = jmx_beans.get(bean_name)