Example #1
0
def cpuinfo():
    """
    :return cpu information of the system
    :return {"output": <dict>, "error": error}
    """
    LOG.info("Getting cpu info")
    try:
        percent_per_cpu = psutil.cpu_percent(percpu=True)
        avg_cpu_percent = psutil.cpu_percent()
        no_of_cpu = psutil.NUM_CPUS
        cpu_time = psutil.cpu_times()
        output = {
            "no_of_cpu": no_of_cpu,
            "cpu_percent": avg_cpu_percent,
            "percent_per_cpu": percent_per_cpu,
            "cputimes": {
                "user": cpu_time.user,
                "nice": cpu_time.nice,
                "system": cpu_time.system,
                "idle": cpu_time.idle,
                "iowait": cpu_time.iowait,
                "irq": cpu_time.irq,
                "softirq": cpu_time.softirq
            }
        }
        return {"output": output, "error": ""}
    except Exception, e:
        LOG.exception(e)
        return {"output": {}, "error": str(e)}
Example #2
0
def _do_collect(interested_ps_names):
    logging.info("start collecting stats")

    cpu_usage = psutil.cpu_percent(interval=0.5, percpu=False)
    cpu_usage = "sys_cpu=%s" % cpu_usage

    per_cpu_usage = psutil.cpu_percent(interval=0.5, percpu=True)
    per_cpu_usage = " ".join(("sys_cpu%d=%s" % (i, usage)
                              for i, usage in enumerate(per_cpu_usage)))

    mem_usage = psutil.virtual_memory()
    mem_usage_gen = ((t, getattr(mem_usage, t)) for t in mem_usage._fields)
    mem_usage = " ".join("sys_mem_%s=%s" % (t, v) for t, v in mem_usage_gen)

    top_mem_ps, top_cpu_ps, interested_ps = _get_process_stats(
                                                   5, interested_ps_names)
    #fmt = "t_mem_name=%s t_mem_pid=%s t_mem_per=%s t_mem_rss=%s t_mem_vms=%s t_mem_cpu=%s"
    #top_proc_mem = _format_process_info(top_mem_ps, fmt)
    top_proc_mem = None

    #fmt = "t_cpu_name=%s t_cpu_pid=%s t_cpu_per=%s t_cpu_rss=%s t_cpu_vms=%s t_cpu_cpu=%s"
    #top_proc_cpu = _format_process_info(top_cpu_ps, fmt)
    top_proc_cpu = None

    interested_procs = ()
    if interested_ps:
        fmt = "i_ps_name=%s i_ps_pid=%s i_ps_per=%s i_ps_rss=%s i_ps_vms=%s i_ps_cpu=%s"
        interested_procs = _format_process_info(interested_ps, fmt)

    logging.info("end of collecting stats")

    return {"cpu_usage": cpu_usage, "per_cpu_usage": per_cpu_usage,
            "mem_usage": mem_usage, "top_proc_mem": top_proc_mem,
            "top_proc_cpu": top_proc_cpu,
            "interested_procs": interested_procs}
def UpdateStats(cpu, memory, net_in, net_out):
  mem_history = []
  cpu_history = []
  net_in_history = []
  net_out_history = []
  buffer_len = 120
  window = buffer_len / 2
  # to baseline the cpu call
  psutil.cpu_percent(interval=1)
  # TODO: remove outliers
  while True:
    vmem = psutil.virtual_memory()
    mem_history.append(vmem.percent / 100)
    mem_history = mem_history[-buffer_len:]
    l = sorted(mem_history, reverse=True)[:window]
    memory.value = sum(l) / len(l)
    cpu_history.append(psutil.cpu_percent(0) / 100)
    cpu_history = cpu_history[-buffer_len:]
    l = sorted(cpu_history, reverse=True)[:window]
    cpu.value = sum(l) / len(l)
    net = psutil.network_io_counters()
    net_in_history.append(net.bytes_recv)
    net_in_history = net_in_history[-buffer_len:]
    bytes = [y-x for x,y in pairwise(net_in_history)]
    if bytes:
      l = sorted(bytes, reverse=True)[:window]
      net_in.value = sum(l) / len(l)
    net_out_history.append(net.bytes_sent)
    net_out_history = net_out_history[-buffer_len:]
    bytes = [y-x for x,y in pairwise(net_out_history)]
    if bytes:
      l = sorted(bytes, reverse=True)[:window]
      net_out.value = sum(l) / len(l)
    time.sleep(10)
Example #4
0
    def __set_bases(self):
        """
        Calculate base stats for I/O at start of the node.
        Used to calculate difference to get per/s stats.
        """
        psutil.cpu_percent()
        psutil.cpu_percent(percpu=True)

        for interface in psutil.net_io_counters(True):

            netint = psutil.net_io_counters(True)[interface]
            total_bytes = netint.bytes_recv + netint.bytes_sent
            total_packages = netint.packets_sent + netint.packets_recv

            self.__bandwidth_base[interface] = total_bytes
            self.__msg_freq_base[interface] = total_packages

        dev_names = []
        for disk in psutil.disk_partitions():
            if all(['cdrom' not in disk.opts, 'sr' not in disk.device]):
                dev_names.append(disk.device)

        for key in psutil.disk_io_counters(True):
            if key in dev_names:
                disk = psutil.disk_io_counters(True)[key]
                self.__disk_read_base[key] = disk.read_bytes
                self.__disk_write_base[key] = disk.write_bytes
    def cpu_monitor(self, cpu_no=None, ivl=0.1):
        pre_cpu_per = 0
        while 1:
            if self.event.isSet():
                break
            if cpu_no is None:
                cpu_per = psutil.cpu_percent(interval=ivl)
            else:
                cpu_per = psutil.cpu_percent(interval=ivl, percpu=True)[cpu_no]

            print 'CPU {}% ---> {}%'.format(pre_cpu_per, cpu_per)
            pre_cpu_per = cpu_per

            pre_rest_time = self.rest_time
            if cpu_per > 20:
                self.rest_time = 100 if self.rest_time >= 100 else self.rest_time + 0.01
            elif cpu_per <= 20 and cpu_per > 10:
                self.rest_time = 0 if self.rest_time <= 0 else self.rest_time - 0.00005
            elif cpu_per <= 10 and cpu_per >= 0:
                self.rest_time = 0 if self.rest_time <= 0 else self.rest_time - 0.0001
            print 'SLT {} ---> {}'.format(pre_rest_time, self.rest_time)
            # print 'SleepTime = %s' % self.rest_time

            time.sleep(1)
            pass
Example #6
0
    def __init__(self, process_identifier, cpu_id, poll_interval=10):
        self.host = platform.node()
        self.cpu_id = cpu_id
        self.process_identifier = process_identifier
        self.monitor_cls_for = {mod.PROVIDER: getattr(
            mod, mod.SYNC_MONITOR_CLS) for mod in module_registry.values()
            if hasattr(mod, 'SYNC_MONITOR_CLS')}

        for p_name, p in providers.iteritems():
            if p_name not in self.monitor_cls_for:
                self.monitor_cls_for[p_name] = self.monitor_cls_for["generic"]

        self.log = get_logger()
        self.log.bind(cpu_id=cpu_id)
        self.log.info('starting mail sync process',
                      supported_providers=module_registry.keys())

        self.syncing_accounts = set()
        self.email_sync_monitors = {}
        self.contact_sync_monitors = {}
        self.event_sync_monitors = {}
        self.poll_interval = poll_interval
        self.semaphore = BoundedSemaphore(1)

        self.stealing_enabled = config.get('SYNC_STEAL_ACCOUNTS', True)
        self.zone = config.get('ZONE')
        self.queue_client = QueueClient(self.zone)

        # We call cpu_percent in a non-blocking way. Because of the way
        # this function works, it'll always return 0.0 the first time
        # we call it. See: https://pythonhosted.org/psutil/#psutil.cpu_percent
        # for more details.
        psutil.cpu_percent(percpu=True)
Example #7
0
def UpdateStats(cpu, memory, net_in, net_out):
  mem_history = []
  cpu_history = []
  net_in_history = []
  net_out_history = []
  buffer_len = 120
  # to baseline the cpu call
  psutil.cpu_percent(interval=1)
  # TODO: remove outliers
  while True:
    vmem = psutil.virtual_memory()
    mem_history.append(vmem.percent / 100)
    mem_history = mem_history[-buffer_len:]
    memory.value = sum(mem_history) / len(mem_history)
    cpu_history.append(psutil.cpu_percent(0) / 100)
    cpu_history = cpu_history[-buffer_len:]
    cpu.value = sum(cpu_history) / len(cpu_history)
    net = psutil.network_io_counters()
    net_in_history.append(net.bytes_rec)
    net_in_history = net_in_history[-buffer_len:]
    bytes = [y-x for x,y in pairwise(net_in_history)]
    if bytes:
      net_in.value = sum(bytes) / len(bytes)
    net_out_history.append(net.bytes_sent)
    net_out_history = net_out_history[-buffer_len:]
    bytes = [y-x for x,y in pairwise(net_out_history)]
    if bytes:
      net_out.value = sum(bytes) / len(bytes)
    time.sleep(10)
Example #8
0
    def measure_status(self, event):
        """
        Collects information about the host's current status using psutils.
        Triggered periodically.
        """
        if not self.__is_enabled:
            pass
        if self.__is_enabled:
            self.__lock.acquire()
            # update node list
            self.__dict_lock.acquire()
            self.update_nodes()
            self.__dict_lock.release()
            # CPU
            self._status.add_cpu_usage(psutil.cpu_percent())
            self._status.add_cpu_usage_core(psutil.cpu_percent(percpu=True))
            # RAM
            self._status.add_ram_usage(psutil.virtual_memory().percent)
            # temp

            self.get_sensors()

            # Bandwidth and message frequency
            self.__measure_network_usage()
            # Disk usage
            self.__measure_disk_usage()
            self.__lock.release()
Example #9
0
def cpu_usage():
      psutil.cpu_percent(interval=1, percpu=False) #ignore first call - often returns 0
      time.sleep(0.12)
      cpu_load = psutil.cpu_percent(interval=1, percpu=False)
      #print "CPU Load: " + str(cpu_load)
      app.logger.info("CPU Load: " + str(cpu_load))
      return cpu_load  
  def set_up_mocks(self, su=None):
    self.mox.StubOutWithMock(dirutil, 'safe_mkdtemp')
    dirutil.safe_mkdtemp().AndReturn('/tmp/test')
    self.mox.StubOutWithMock(log, 'init')
    log.init('/tmp/test/current_run').AndReturn(0)

    self.mox.StubOutWithMock(CommandUtil, 'execute_and_get_output')
    stub = CommandUtil.execute_and_get_output(['git','remote', '-v'])
    stub.AndReturn((0, dedent("""origin  https://git.twitter.biz/science (fetch)
    origin  https://git.twitter.biz/science (push)""")))
    stub2 = CommandUtil.execute_and_get_output(['git','rev-parse', '--abbrev-ref', 'HEAD'])
    stub2.AndReturn((0,"test_br"))

    self.mox.StubOutWithMock(psutil, 'cpu_percent')
    psutil.cpu_percent(interval=1).AndReturn(1.0)
    self.mox.StubOutWithMock(psutil, 'network_io_counters')
    psutil.network_io_counters().AndReturn("1000,10000,1000")
    self.mox.StubOutWithMock(psutil, 'NUM_CPUS')
    psutil.NUM_CPUS = 5

    self.mox.StubOutWithMock(socket, 'gethostname')
    socket.gethostname().AndReturn("localhost")
    self.mox.StubOutWithMock(socket, 'gethostbyname')
    socket.gethostbyname("localhost").AndReturn("localhost")

    self.mox.StubOutWithMock(sys, 'exit')
    sys.exit(0).AndReturn(0)
    self.mox.ReplayAll()
Example #11
0
def send_cpu(vm_name, vm_uuid):
    client = boto3.client('cloudwatch')
    while True:
        response = client.put_metric_data(
             Namespace='cloudstack',
             MetricData=[
                 {
                     'MetricName' : 'cpu',
                     'Dimensions' : [
                         {
                             'Name' : 'InstanceUUID',
                             'Value': vm_uuid
                         },
                         {
                             'Name' : 'InstanceName',
                             'Value': vm_name
                         },
                     ],
                     'Value': psutil.cpu_percent(),
                     'Unit' : 'Percent',
                 },
            ]     
         )
        print("Sent cpu %s" % psutil.cpu_percent())
        time.sleep(60)
Example #12
0
    def __init__(self, color, all_cpus=False, complete=True):
        Segment.__init__(self)
        
        self.set_icon('cpu', color)

        if all_cpus == True:
        #  CPU percentage per core:
            cpuloads = psutil.cpu_percent(interval=sleep_interval, percpu=True)
            for i, load in enumerate(cpuloads):
                rounded = str(round(load))
                if len(rounded) == 1:
                    cpuloads[i] = ' ' + rounded + '%'   # pad the value if only one digit
                else:
                    cpuloads[i] = rounded + '%'
                    
            # Compile the string that will be printed:
            output = ''
            for i, value in enumerate(cpuloads):
                output = output + 'CPU' + str(i) + ': ' + value + ' | '
            # remove the trailing ' | ' from last item:
            output = output[0:len(output)-2]

        else:
        #  CPU percentage overall:
            output = str(round(psutil.cpu_percent(interval=sleep_interval))) + '%'
            if len(output) == 2:
                output=' ' + output # pad the value if only one digit

        if complete == True:
            self.build_module(output, color)
        else:
            self.build_module(output, color, 'txtonly')
Example #13
0
def info():
    yellow=yellow_a[c[3]]

    ip = commands.getoutput("hostname -I")
    

    temp = commands.getoutput("/opt/vc/bin/vcgencmd measure_temp")
    try:

        junk,temp=split(temp,'/vcgencmd:')
    except:
        temp=str(temp)


    cpu=(psutil.cpu_percent())
    ram=(psutil.virtual_memory())
    print ram[2]
    ram= ram[2]
    ram=str(ram)



    i=0

    lab(ip,(yellow),50,60,True,i)
    lab(temp,(yellow),50,150,True,i)
    lab(str(psutil.cpu_percent()),(yellow),50,250,True,i)
    lab(ram,(yellow),50,350,True,i)
Example #14
0
    def __init__(self, color, all_cpus=False):
        Segment.__init__(self)

        self.set_icon('cpu', color)

        if all_cpus == True:
        #  CPU percentage per core:
            cpuloads = psutil.cpu_percent(interval=sleep_interval, percpu=True)

            output = ''
            for i, load in enumerate(cpuloads):
                rounded = str(round(load)) + '%'
                if len(rounded) == 2:
                    rounded = ' ' + rounded  # pad the value if only one digit
                output = output + 'CPU' + str(i) + ': ' + rounded + ' ' + self.icons.get('miniarrow') + ' '

            # remove the trailing ' | ' from last item:
            output = output[0:len(output)-3]

        else:
        #  CPU percentage overall:
            output = str(round(psutil.cpu_percent(interval=sleep_interval))) + '%'
            if len(output) == 2:
                output = ' ' + output # pad the value if only one digit

        self.build_module(output, color)
Example #15
0
    def start(self):
        psutil.cpu_percent()
        time.sleep(1)
        self.keep_running = True
        t = threading.Thread(target=self.finalize_tasks)
        t.daemon = True
        t.start()
        logger.info('Starting Task Server')
        logger.info("using code at: " + __file__)
        logger.info("Path to do_ Scripts : %s" % self.path_to_do_scripts)
        logger.info("Data_dir : %s" % self.data_dir)
        logger.info("Port : %s" % self.port)

        if self.sg.cluster_scheduler == 1:
            logger.info("Initilizing DRMAA interface to cluster scheduler")
            import drmaa
            self.drmaa_session = drmaa.Session()  # Start the interface session to DRMAA to control GridEngine
            self.drmaa_session.initialize()
        try:
            # Setup a thread that just updates the last checkin time for this still every 5min
            timer_thread = threading.Thread(target=self.checkin_timer)
            timer_thread.daemon = True  # Make it a daemon so that when ctrl-c happens this thread goes away
            timer_thread.start()  # Start heartbeat
            self.serve_forever()  # Start the lisetenser server
        finally:
            self.shutdown()
        return
Example #16
0
    def test_no_procfs_on_import(self, tb):
        my_procfs = tempfile.mkdtemp()

        with open(os.path.join(my_procfs, 'stat'), 'w') as f:
            f.write('cpu   0 0 0 0 0 0 0 0 0 0\n')
            f.write('cpu0  0 0 0 0 0 0 0 0 0 0\n')
            f.write('cpu1  0 0 0 0 0 0 0 0 0 0\n')

        try:
            orig_open = open

            def open_mock(name, *args, **kwargs):
                if name.startswith('/proc'):
                    raise IOError(errno.ENOENT, 'rejecting access for test')
                return orig_open(name, *args, **kwargs)

            patch_point = 'builtins.open' if PY3 else '__builtin__.open'
            with mock.patch(patch_point, side_effect=open_mock):
                importlib.reload(psutil)
                assert tb.called

                self.assertRaises(IOError, psutil.cpu_times)
                self.assertRaises(IOError, psutil.cpu_times, percpu=True)
                self.assertRaises(IOError, psutil.cpu_percent)
                self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
                self.assertRaises(IOError, psutil.cpu_times_percent)
                self.assertRaises(
                    IOError, psutil.cpu_times_percent, percpu=True)

                psutil.PROCFS_PATH = my_procfs

                self.assertEqual(psutil.cpu_percent(), 0)
                self.assertEqual(sum(psutil.cpu_times_percent()), 0)

                # since we don't know the number of CPUs at import time,
                # we awkwardly say there are none until the second call
                per_cpu_percent = psutil.cpu_percent(percpu=True)
                self.assertEqual(sum(per_cpu_percent), 0)

                # ditto awkward length
                per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
                self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)

                # much user, very busy
                with open(os.path.join(my_procfs, 'stat'), 'w') as f:
                    f.write('cpu   1 0 0 0 0 0 0 0 0 0\n')
                    f.write('cpu0  1 0 0 0 0 0 0 0 0 0\n')
                    f.write('cpu1  1 0 0 0 0 0 0 0 0 0\n')

                self.assertNotEqual(psutil.cpu_percent(), 0)
                self.assertNotEqual(
                    sum(psutil.cpu_percent(percpu=True)), 0)
                self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
                self.assertNotEqual(
                    sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
        finally:
            shutil.rmtree(my_procfs)
            importlib.reload(psutil)

        self.assertEqual(psutil.PROCFS_PATH, '/proc')
Example #17
0
 def starting(self, sender, **kwargs):
     
     psutil.cpu_times_percent()
     psutil.cpu_percent()
     _, _, my_id = self.vip.hello().get(timeout=3)
     self.vip.pubsub.publish(peer='pubsub', topic='/platform',
                             message='available')
Example #18
0
    def cpu_usage():
        cpu_usage_metrics = dict()
        cpu_usage_metrics['cpu_count'] = psutil.cpu_count(logical=True)
        cpu_usage_metrics['cpu_percent'] = psutil.cpu_percent(interval=1)
        cpu_usage_metrics['cpu_usage_each_core'] = psutil.cpu_percent(interval=1, percpu=True)

        return cpu_usage_metrics
def evaluate_all(exp_params):
    job_ids = []
    for data_file in sorted(all_data_files(exp_params['data_dir'])):
        #data = split_into_folds(standardise_inputs(load_dictionary(data_file)))
        data_name = os.path.splitext(os.path.basename(data_file))[0]
        for method in exp_params['methods']:
            save_file_name = os.path.join(exp_params['save_dir'], method.description(), data_name + '.score')
            if exp_params['overwrite'] or (not os.path.isfile(save_file_name)):
                while psutil.cpu_percent() > exp_params['max_cpu_percent']:
                    time.sleep(10)
                print 'Running %s %s' % (data_name, method.description())
                if exp_params['multithread']:
                    print "This is wrong"
                    #job_ids.append(cloud.mp.call(evaluate_and_save, method, data_file, save_file_name, _max_runtime=exp_params['max_job_time']))
                else:
                    evaluate_and_save(method, True, data_file, save_file_name)
                #time.sleep(exp_params['sleep_time'])
            else:
                print 'Skipping %s %s' % (data_name, method.description()) 
        for prop in exp_params['properties']:
            save_file_name = os.path.join(exp_params['save_dir'], prop.description(), data_name + '.score')
            if exp_params['overwrite'] or (not os.path.isfile(save_file_name)):
                while psutil.cpu_percent() > exp_params['max_cpu_percent']:
                    time.sleep(10)
                print "Running %s %s" % (data_name, prop.description())
                evaluate_and_save(prop, False, data_file, save_file_name)
            else:
                print "Skipping %s %s" % (data_name, prop.description())
    if exp_params['multithread']:
        print 'Waiting for all jobs to complete'
        #cloud.mp.join(job_ids, ignore_errors=True)
    print 'Finished'
Example #20
0
 def update(self):
     self.data['cpu_percent'] = psutil.cpu_percent()
     self.data['cpu_percents'] = psutil.cpu_percent(percpu=True)
     self.data['memory'] = self._virtual_memory()
     self.data['swap'] = self._swap_memory()
     self.data['uptime'] = int(time.time() - self.data['boot_time'])
     super(Plugin, self).update()
Example #21
0
def get_cpu_node():
    cpu_count = RunnableNode('count', method=lambda: ([len(ps.cpu_percent(percpu=True))], 'cores'))
    cpu_percent = LazyNode('percent', method=lambda: (ps.cpu_percent(interval=0.5, percpu=True), '%'))
    cpu_user = RunnableNode('user', method=lambda: ([x.user for x in ps.cpu_times(percpu=True)], 'ms'))
    cpu_system = RunnableNode('system', method=lambda: ([x.system for x in ps.cpu_times(percpu=True)], 'ms'))
    cpu_idle = RunnableNode('idle', method=lambda: ([x.idle for x in ps.cpu_times(percpu=True)], 'ms'))
    return ParentNode('cpu', children=[cpu_count, cpu_system, cpu_percent, cpu_user, cpu_idle])
Example #22
0
def xhr_performance():
    global processSchedule
    global processList
    
    info = {}
    settings = {}
    
    if (processSchedule == None):
        logger.log("Process List SCHEDULE Job is Starting", 'INFO')
        SCHEDULE.add_interval_job(get_process_performance, seconds=5)
        processSchedule = 1
    
    #Get Memory Status and NetIO Status
    physicalMemory = psutil.virtual_memory()
    swapMemory = psutil.swap_memory()
    netio = psutil.net_io_counters(False)
    
    #Get settings
    settings['show_cpu_utilization'] = get_setting_value('show_cpu_utilization')
    settings['show_network_utilization'] = get_setting_value('show_network_utilization')
    settings['show_process_utilization'] = get_setting_value('show_process_utilization')
    
    #Get Memory Stats
    info['usedPhyMemory'] = convert_bytes(physicalMemory.used)
    info['availPhyMemory'] = convert_bytes(physicalMemory.free)
    info['totalPhyMemory'] = convert_bytes(physicalMemory.total)
    info['usedSwapMemory'] = convert_bytes(swapMemory.used)
    info['availSwapMemory'] = convert_bytes(swapMemory.free)
    info['totalSwapMemory'] = convert_bytes(swapMemory.total)
    
    #Display Network Status
    if (settings['show_network_utilization'] == '1'):
        info['bytesSent'] = convert_bytes(netio.bytes_sent)
        info['bytesSentRate'] = updateSentRate(netio.bytes_sent)
        info['bytesRecv'] = convert_bytes(netio.bytes_recv)
        info['bytesRecvRate'] = updateDownloadRate(netio.bytes_recv)
        info['packetSent'] = convert_bytes(netio.packets_sent).replace('B', '')
        info['packetRecv'] = convert_bytes(netio.packets_recv).replace('B', '')
        info['errin'] = netio.errin
        info['errout'] = netio.errout
    
    # must have some delay to prevent errors
    if (settings['show_cpu_utilization'] == '1'):
        i = 0
        cpuList = [ ]
        cpuPerCore = namedtuple('CPUPerCore', "index CPUpercentage")
        for item in psutil.cpu_percent(0.1, True):
            cpuList.append(cpuPerCore(index=i, CPUpercentage=item))
            i += 1
        info['totalCPUCol'] = i     #used for html format table
        info['cpuPercent'] = cpuList
        info['cpuOverall'] = psutil.cpu_percent(0.1, False)
        info['cpuTimes'] = psutil.cpu_times_percent(0.1, False)
    
    if (settings['show_process_utilization'] == '1'):
        info['processPerformance'] = processList
    
    # Render the template for our module
    return render_template('performance.html', result = info, settings = settings)
Example #23
0
 def test_sys_per_cpu_percent(self):
     last = psutil.cpu_percent(interval=0.001, percpu=True)
     self.assertEqual(len(last), psutil.cpu_count())
     for x in range(100):
         new = psutil.cpu_percent(interval=None, percpu=True)
         for percent in new:
             self._test_cpu_percent(percent, last, new)
         last = new
Example #24
0
def test_cpu():
    # giant hack incoming for some high cpu bug:
    import psutil
    import sys
    print psutil.cpu_percent() 
    if psutil.cpu_percent() > 50:
        print "cpu too high, exiting"
        sys.exit()
Example #25
0
 def __init__(self, name, init_config, agent_config):
     super(Cpu, self).__init__(name, init_config, agent_config)
     # psutil.cpu_percent and psutil.cpu_times_percent are called in
     # __init__ because the first time these two functions are called with
     # interval = 0.0 or None, it will return a meaningless 0.0 value
     # which you are supposed to ignore.
     psutil.cpu_percent(interval=None, percpu=False)
     psutil.cpu_times_percent(interval=None, percpu=False)
Example #26
0
 def test_system_cpu_percent(self):
     psutil.cpu_percent(interval=0.001)
     psutil.cpu_percent(interval=0.001)
     for x in xrange(1000):
         percent = psutil.cpu_percent(interval=None)
         self.assertTrue(isinstance(percent, float))
         self.assertTrue(percent >= 0.0)
         self.assertTrue(percent <= 100.0)
Example #27
0
 def test_cpu_percent(self):
     last = psutil.cpu_percent(interval=0.001)
     for x in range(100):
         new = psutil.cpu_percent(interval=None)
         self._test_cpu_percent(new, last, new)
         last = new
     with self.assertRaises(ValueError):
         psutil.cpu_percent(interval=-1)
Example #28
0
 def prepare_cpu(self):
     data = {}
     data['percent'] = psutil.cpu_percent(interval=0.1)
     data['cores'] = {}
     for cpu_num, percent in enumerate(psutil.cpu_percent(
             interval=0.1, percpu=True)):
         data['cores'][cpu_num] = percent
     return data
Example #29
0
def cpu_task(stat):
    cpu_percent = psutil.cpu_percent(interval=0, percpu=True)
    total_cpu = str(psutil.cpu_percent(interval=0))+'%';
    stat.add('Total CPU', total_cpu)
    for i in range(len(cpu_percent)):
        stat.add('CPU '+str(i), str(cpu_percent[i])+'%')

    stat.summary(DiagnosticStatus.OK, total_cpu)
            prev_frame_capture_time_ms = frame_capture_time_ms
            frame_capture_time_ms = cap_time_tmp
            # Update the image counter
            frame_counter = frame_counter+1


            # Clear out the arrays which will hold the processed data
            curObservation.clear()

            # Process the image
            img_process(img)

            # Capture CPU metrics at a slower interval, we don't need to update these
            #  super often.
            if(frame_counter % 15 == 0):
                cpu_load_pct = psutil.cpu_percent()
                mem_load_pct = psutil.virtual_memory().percent

            # Calculate processing time
            proc_time_ms = millis() - frame_capture_time_ms
            # Calculate present FPS (capture and processing)
            fps_current = 1000/(frame_capture_time_ms - prev_frame_capture_time_ms)

            # Add the metadata to the present target observation data
            curObservation.setMetadata(frame_counter,proc_time_ms,cpu_load_pct,mem_load_pct,fps_current)

            # Transmit the vision processing results to the roboRIO
            outputDataServer.sendString(curObservation.toJsonString())
            indicateLEDsProcessingActive()

Example #31
0
    def get_proc_info(self, proc):
        '''
        获取进程信息
        :return:
        '''
        try:
            procinfo = {}

            procinfo['id'] = proc.pid
            procinfo['name'] = proc.name()
            procinfo['num_threads'] = proc.num_threads()
            procinfo['num_handles'] = proc.num_handles()
            procinfo['threads'] = proc.threads()
            procinfo['connections'] = proc.connections()
            procinfo['memory_percent'] = proc.memory_percent()
            procinfo['memory_info'] = proc.memory_info()
            procinfo['cpu_affinity'] = proc.cpu_affinity()
            procinfo['cpu_times'] = proc.cpu_times()
            procinfo['p_cpu_percent'] = proc.cpu_percent(interval=self.proc_monitor_interval)
            procinfo['t_cpu_percent'] = psutil.cpu_percent(interval=self.proc_monitor_interval)
            procinfo['cpu_count_real'] = psutil.cpu_count()
            procinfo['cpu_count_logical'] = psutil.cpu_count(logical=False)

            cpu_count_real = procinfo['cpu_count_real']
            cpu_count_logical = procinfo['cpu_count_logical']
            p_cpu_percent = procinfo['p_cpu_percent']
            t_cpu_percent = procinfo['t_cpu_percent']
            return (True, p_cpu_percent, t_cpu_percent, cpu_count_real, cpu_count_logical)

        except Exception as e:
            print(e)
            return (False, 0, 0, 0, 0)

        def startup(self, exepath):
            """开启进程"""
            commands = []
            try:
                if os.path.exists(exepath):
                    p = psutil.Popen(commands, stdout=PIPE)
            except Exception as e:
                print(e)

        def termination(self, proc=None, pname=None, pid=None):
            '''终止进程'''
            try:
                if proc in self.all_process:
                    proc.terminal()
                    os.system("taskkill /PID %s", proc.pid)
                    return True

                if pname:
                    for process in self.all_process:
                        if pname == process.name():
                            os.system("taskkill /PID %s", process.pid)
                            return True
                if pid:
                    for process in self.all_process:
                        if pid == process.pid:
                            os.system("taskkill /PID %s", pid)
                            return True
            except Exception as e:
                print('exception failed')
                return False

        def loop_controll(self):
            while 1:
                try:
                    # 获取配置文件中配置的所有进程
                    for process in self.proc_conf_list:
                        # 是否存活
                        if self.is_alive_proc(proc=process):
                            continue

                    # 进程挂掉则拉起
                    time.sleep(self.getprocinfospantime)
                except Exception as e:
                    print('loopControl.while :%s', e)
import psutil

# Gets percentage cpu usage
cpupercent = (psutil.cpu_percent(interval=1))

# Gets cpu average load (1 minute, 5 minute, 15 minute)
cpuloadavg = (psutil.getloadavg())

# 1024.0 ** 3 changes the total bytes as given by virtual_memory to gigabytes.
memory = round((psutil.virtual_memory().total / (1024.0**3)), 2)

for x in range(3):
    print("CPU Percentage: ", cpupercent)
    print("CPU load avg: ", cpuloadavg)

print("\n")

print(memory, "GB")
        time.sleep(0.01)

    for i in range(0, 1):
        humidity = sense.get_humidity()
        #print("Humidity: %s %%H" % humidity)
        sense_pressure = sense.get_pressure()
        #print("Pressure: %s Millibars" % sense_pressure)
        convert_pressure = sense_pressure * 100
        #print("Pressure: %s Pascal" % convert_pressure)
        temp1 = sense.get_temperature_from_humidity()
        #print("Temperature1: %sC" % temp1)
        temp2 = sense.get_temperature_from_pressure()
        #print("Temperature2: %sC" % temp2)
        temp3 = ((temp1 + temp2) / 2)
        #print("Temperature3: %sC" % temp3)
        cpu_pc = psutil.cpu_percent()
        #print cpu_pc
        mem_avail_mb = psutil.avail_phymem() / 1000000
        #print mem_avail_mb
        cpu_temp = round(
            int(open('/sys/class/thermal/thermal_zone0/temp').read()) / 1e3, 1)
        #print cpu_temp

        if temp3 < 0:
            #print "cas 1"
            color = white
        elif temp3 >= 0 and temp3 < 14:
            #print "cas 2"
            color = blue
        elif temp3 >= 14 and temp3 < 25:
            #print "cas 3"
Example #34
0
def GetPie():
    try:
        #cpu
        cpuCount = psutil.cpu_count(logical=False)  #CPU核心
        cpuPercent = psutil.cpu_percent(0.5)  #使用率
        cpufree = round(100 - cpuPercent, 2)  #CPU空余
        #内存
        m = psutil.virtual_memory()  #内存信息
        memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2)  #总内存
        memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2)  #已用内存
        memoryFree = round(memoryTotal - memoryUsed, 2)  #剩余内存

        #磁盘
        io = psutil.disk_partitions()
        if platform.system().upper() == 'WINDOWS':
            del io[-1]
        diskCount = len(io)
        diskTotal = 0  #总储存空间大小
        diskUsed = 0  #已用
        diskFree = 0  #剩余
        for i in io:
            try:
                #若windows下插入U盘,访问U盘磁盘时,会出现"设备未就绪的错误"
                o = psutil.disk_usage(i.mountpoint)
                diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))
                diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))
                diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))
            except:
                pass
        resJson = []
        resJson.append({
            'ttl':
            'CPU状态',
            'subtext':
            str(cpuCount) + '核心',
            'keys': ['使用率', '空闲'],
            'json': [{
                'value': cpuPercent,
                'name': '使用率'
            }, {
                'value': cpufree,
                'name': '空闲'
            }],
            'pieBox':
            'echartsCPU',
            'suffix':
            '%'
        })
        resJson.append({
            'ttl':
            '内存状态',
            'subtext':
            '总内存' + str(memoryTotal) + 'G',
            'keys': ['已用', '剩余'],
            'json': [{
                'value': memoryUsed,
                'name': '已用'
            }, {
                'value': memoryFree,
                'name': '剩余'
            }],
            'pieBox':
            'echartsMemory',
            'suffix':
            'G'
        })
        resJson.append({
            'ttl':
            '磁盘状态',
            'subtext':
            str(diskCount) + '个分区.' + '共' + str(diskTotal) + 'G',
            'keys': ['已使用', '未使用'],
            'json': [{
                'value': diskUsed,
                'name': '已使用'
            }, {
                'value': diskFree,
                'name': '未使用'
            }],
            'pieBox':
            'echartsDisk',
            'suffix':
            'G'
        })
        #计算开机时间
        sd = (datetime.datetime.now() - datetime.datetime.fromtimestamp(
            psutil.boot_time())).seconds  #当前时间减去开机时间的秒
        m, s = divmod(sd, 60)  #m是分钟,余数s是秒
        h, m = divmod(m, 60)  #分钟计算出小时
        systim = "%02d小时%02d分钟" % (h, m)
        sysinfo = [
            '系统信息:' + platform.platform() + '-' + platform.architecture()[0]
        ]
        try:
            sysinfo.append(platform.uname().processor)
        except:
            pass
        sysinfo.append('已开机运行了' + systim)
    except Exception as e:
        return json.dumps({'resultCode': 1, 'result': str(e)})
    else:
        return json.dumps({
            'resultCode': 0,
            'result': resJson,
            'sysinfo': sysinfo
        })
import psutil
import time
import json
import http.client
import configparser

config = configparser.ConfigParser()
config.read('config.txt')
master_ip = config['CONFIGURATION']['MASTER_IP']
cluster_id = config['CONFIGURATION']['CLUSTER_ID']

while True:
    stat = {}
    stat['disk_usage'] = psutil.disk_usage('/').percent
    stat['memory_usage'] = psutil.virtual_memory().percent
    stat['cpu_usage'] = psutil.cpu_percent()
    netio1 = psutil.net_io_counters()
    time.sleep(1)
    netio2 = psutil.net_io_counters()
    stat['network_usage'] = (netio2.bytes_sent - netio1.bytes_sent) / 1000
    stat['cluster'] = cluster_id

    data = json.dumps(stat)
    headers = {
        "Content-type": "application/x-www-form-urlencoded",
        "Accept": "text/plain"
    }
    conn = http.client.HTTPConnection(master_ip)
    conn.request("POST", "/dyno/", data, headers)
    response = conn.getresponse()
    print(response.status, response.reason)
Example #36
0
    def __init__(self, document):
        # type: (couchdb.Document) -> None

        self.id = uuid.uuid4()
        self.user = USER.id

        if failure is True:
            # precondition management
            pass
        elif not isinstance(document, couchdb.Document):



            msg = {
                'event': 'DOC-CRTN',
                'id': str(self.id),
                'user': str(USER.id),
                'error': 'error',
                'memory': (psutil.virtual_memory()[2])*(.000001),
                'cpu': (psutil.cpu_percent()),
                'time': str(datetime.utcnow()),
                'label': 'document'
            }


            err.error(jsonify(msg))

        else:
            # if all exceptions passed

            try:
                self.ref = document



                msg = {
                    'event': 'DOC-CRTN',
                    'id': str(self.id),
                    'type': str(type(document)),
                    'address': str(document),
                    'user': str(USER.id),
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'document'
                }

                deb.debug(jsonify(msg))

            except Exception as e:
                # if any further exception occurs



                msg = {
                    'event': 'DOC-CRTN',
                    'id':  str(self.id),
                    'type':  str(type(document)),
                    'address':  str(document),
                    'user':  str(USER.id),
                    'error':  'error',
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'document'
                }



                err.error(jsonify(msg))
Example #37
0
    def __init__(self, f):
        # type: (file) -> None

        self.id = uuid.uuid4()
        self.user = USER.id

        if failure is True:
            # precondition management
            pass

        elif not isinstance(f, file):
            # if file not found


            msg = {
                'event': 'FIL-CRTN',
                'id': str(self.id),
                'user': str(USER.id),
                'error': 'FCE',
                'memory': (psutil.virtual_memory()[2])*(.000001),
                'cpu': (psutil.cpu_percent()),
                'time': str(datetime.utcnow()),
                'label': 'file'
            }

            err.error(jsonify(msg))

            elasticfile(msg['id'], msg['user'], 'InvalidFile', msg['memory'], msg['cpu'], msg['time'], msg['error'])

            ''' GDB part   this part is never executed
            props = ['SOURCE:\"' + msg['source'] + '\"', 'id:\"' + msg['id'] + '\"', 'type:\"' + msg['type'] + '\"',
                     'user:\"' + msg['user'] + '\"', 'error:\"' + msg['error'] + '\"', 'time:\"' + str(msg['time']) + '\"',
                     'memory:\"' + str(msg['memory']) + '\"', 'cpu:\"' + str(msg['cpu']) + '\"']
            propsQuery = ','.join(props)
            # print propsQuery
            query = 'create (n:File{' + propsQuery + '})'
            graph.run(query)
            '''

        else:
            # if all exceptions passed
            try:
                self.ref = f



                msg = {
                    'event': 'FIL-CRTN',
                    'id': str(self.id),
                    'type': str(type(f)),
                    'source': str(f.name),
                    'user': str(USER.id),
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'file',
                    'error': 'success'
                }

                deb.debug(jsonify(msg))

                elasticfile(msg['id'], msg['user'], msg['source'] ,msg['memory'], msg['cpu'], msg['time'], msg['error'])

                ''' GDB part '''
                props = ['SOURCE:\"' + msg['source'] + '\"', 'id:\"' + msg['id'] + '\"', 'type:\"' + msg['type'] + '\"', 'user:\"' + msg['user'] + '\"', 'error:\"null\"', 'time:\"' + str(msg['time']) + '\"',
                         'memory:\"' + str(msg['memory']) + '\"', 'cpu:\"' + str(msg['cpu']) + '\"', 'label:\"' + str(msg['label']) + '\"']
                propsQuery = ','.join(props)
                #print propsQuery
                query = 'create (n:File{' + propsQuery + '})'
                graph.run(query)




            except Exception as e:
                # if any further exception occurs



                msg = {
                    'event': 'FIL-CRTN',
                    'id': str(self.id),
                    'type': str(type(f)),
                    'source': str(f.name),
                    'user': str(USER.id),
                    'error': 'OE',
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'file'
                }

                err.error(jsonify(msg))

                elasticfile(msg['id'], msg['user'], msg['source'], msg['memory'], msg['cpu'], msg['time'], msg['error'])

                ''' GDB part '''
                props = ['SOURCE:\"' + msg['source'] + '\"', 'id:\"' + msg['id'] + '\"', 'type:\"' + msg['type'] + '\"',
                         'user:\"' + msg['user'] + '\"', 'error:\"' + msg['error'] + '\"', 'time:\"' + str(msg['time']) + '\"',
                         'cpu:\"' + str(msg['cpu']) + '\"', 'memory:\"' + str(msg['memory']) + '\"', 'label:\"' + str(msg['label']) + '\"']
                propsQuery = ','.join(props)
                # print propsQuery
                query = 'create (n:File{' + propsQuery + '})'
                graph.run(query)
Example #38
0
import sqlite3
import datetime
import time
import psutil

while True:
    #get current cpu percentage
    currentcpu = psutil.cpu_percent(interval=1)
    #get current time
    currenttime = datetime.datetime.now().time()

    #create the connection to the database
    conn = sqlite3.connect('testdata.db')
    print "Opened database successfully"

    #add the values into the database, use str() to turn the values into strings
    conn.execute("INSERT INTO CPU (TIME,CPUUSE) VALUES ('" + str(currenttime) +
                 "','" + str(currentcpu) + "')")

    #commit the data to the databse
    conn.commit()
    print "Records created successfully"

    #close the connection
    conn.close()
    #wait ten seconds
    time.sleep(10)
Example #39
0
def get_diag():
    embed = discord.Embed(title="UWU Bot Diagnostics",color=0xff0000)
    embed.add_field(name="CPU",value=str(psutil.cpu_percent()) + "%")
    embed.add_field(name="RAM",value=str(psutil.virtual_memory()[2]) + "%")
    return embed
Example #40
0
    def _machine_stats(self):
        """
        :return: machine stats dictionary, all values expressed in megabytes
        """
        cpu_usage = [float(v) for v in psutil.cpu_percent(percpu=True)]
        stats = {
            "cpu_usage": sum(cpu_usage) / float(len(cpu_usage)),
        }

        bytes_per_megabyte = 1024**2

        def bytes_to_megabytes(x):
            return x / bytes_per_megabyte

        virtual_memory = psutil.virtual_memory()
        stats["memory_used_gb"] = bytes_to_megabytes(
            virtual_memory.used) / 1024
        stats["memory_free_gb"] = bytes_to_megabytes(
            virtual_memory.available) / 1024
        disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
        stats["disk_free_percent"] = 100.0 - disk_use_percentage
        with warnings.catch_warnings():
            if logging.root.level > logging.DEBUG:  # If the logging level is bigger than debug, ignore
                # psutil.sensors_temperatures warnings
                warnings.simplefilter("ignore", category=RuntimeWarning)
            sensor_stat = (psutil.sensors_temperatures() if hasattr(
                psutil, "sensors_temperatures") else {})
        if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
            stats["cpu_temperature"] = max(
                [float(t.current) for t in sensor_stat["coretemp"]])

        # update cached measurements
        net_stats = psutil.net_io_counters()
        stats["network_tx_mbs"] = bytes_to_megabytes(net_stats.bytes_sent)
        stats["network_rx_mbs"] = bytes_to_megabytes(net_stats.bytes_recv)
        io_stats = psutil.disk_io_counters()
        stats["io_read_mbs"] = bytes_to_megabytes(io_stats.read_bytes)
        stats["io_write_mbs"] = bytes_to_megabytes(io_stats.write_bytes)

        # check if we can access the gpu statistics
        if self._gpustat:
            try:
                gpu_stat = self._gpustat.new_query()
                for i, g in enumerate(gpu_stat.gpus):
                    # only monitor the active gpu's, if none were selected, monitor everything
                    if self._active_gpus and i not in self._active_gpus:
                        continue
                    stats["gpu_%d_temperature" % i] = float(
                        g["temperature.gpu"])
                    stats["gpu_%d_utilization" % i] = float(
                        g["utilization.gpu"])
                    stats["gpu_%d_mem_usage" % i] = 100. * float(
                        g["memory.used"]) / float(g["memory.total"])
                    # already in MBs
                    stats["gpu_%d_mem_free_gb" %
                          i] = float(g["memory.total"] -
                                     g["memory.used"]) / 1024
                    stats["gpu_%d_mem_used_gb" %
                          i] = float(g["memory.used"]) / 1024
            except Exception:
                # something happened and we can't use gpu stats,
                self._gpustat_fail += 1
                if self._gpustat_fail >= 3:
                    self._task.get_logger().report_text(
                        'TRAINS Monitor: GPU monitoring failed getting GPU reading, '
                        'switching off GPU monitoring')
                    self._gpustat = None

        return stats
def cpu():
    usage = str(psutil.cpu_percent())
    speak("CPU is at "+usage)
    battery = psutil.sensors_battery()
    speak("Battery is at ")
    speak(battery.percent)
def get_cpu():
	return psutil.cpu_percent(interval=INTERVAL)
Example #43
0
def main():
    # A couple of "Uber Elements" that combine several elements and enable bulk edits
    def Txt(text, **kwargs):
        return (sg.Text(text, font=('Helvetica 8'), **kwargs))

    def GraphColumn(name, key):
        col = sg.Column([[
            Txt(name, key=key + '_TXT_'),
        ],
                         [
                             sg.Graph((GRAPH_WIDTH, GRAPH_HEIGHT), (0, 0),
                                      (GRAPH_WIDTH, 100),
                                      background_color='black',
                                      key=key + '_GRAPH_')
                         ]],
                        pad=(2, 2))
        return col

    num_cores = len(
        psutil.cpu_percent(percpu=True))  # get the number of cores in the CPU

    sg.ChangeLookAndFeel('Black')
    sg.SetOptions(element_padding=(0, 0), margins=(1, 1), border_width=0)

    # ----------------  Create Layout  ----------------
    layout = [[
        sg.RButton('',
                   image_data=red_x,
                   button_color=('black', 'black'),
                   key='Exit',
                   tooltip='Closes window'),
        sg.Text('     CPU Core Usage')
    ]]

    # add on the graphs
    for rows in range(num_cores // NUM_COLS + 1):
        row = []
        for cols in range(min(num_cores - rows * NUM_COLS, NUM_COLS)):
            row.append(
                GraphColumn('CPU ' + str(rows * NUM_COLS + cols),
                            '_CPU_' + str(rows * NUM_COLS + cols)))
        layout.append(row)

    # ----------------  Create Window  ----------------
    window = sg.Window(
        'PSG System Dashboard',
        keep_on_top=True,
        auto_size_buttons=False,
        grab_anywhere=True,
        no_titlebar=True,
        default_button_element_size=(12, 1),
        return_keyboard_events=True,
        alpha_channel=TRANSPARENCY,
        use_default_focus=False,
    ).Layout(layout).Finalize()

    # setup graphs & initial values
    graphs = []
    for i in range(num_cores):
        graphs.append(
            DashGraph(window.FindElement('_CPU_' + str(i) + '_GRAPH_'),
                      window.FindElement('_CPU_' + str(i) + '_TXT_'), 0,
                      colors[i % 6]))

    # ----------------  main loop  ----------------
    while (True):
        # --------- Read and update window once every Polling Frequency --------
        event, values = window.Read(timeout=POLL_FREQUENCY)
        if event in (None, 'Exit'):  # Be nice and give an exit
            break
        # read CPU for each core
        stats = psutil.cpu_percent(percpu=True)
        # Update each graph
        for i in range(num_cores):
            graphs[i].graph_percentage_abs(stats[i])
            graphs[i].text_display('{} CPU {:2.0f}'.format(i, stats[i]))
Example #44
0
import psutil
cpu_utilization = psutil.cpu_percent(1, False)
print('CPU_UTILIZATION:', cpu_utilization)
Example #45
0
def send_cpu_usage(*args):
    cu = psutil.cpu_percent()
    cf = psutil.cpu_freq()[0]
    response = {'usage': cu, 'freq': cf}
    socketIO.emit('cpu_usage_response', response)
class SystemInfo(object):
    def get_size(bytes, suffix="B"):
        """
        Scale bytes to its proper format
        e.g:
            1253656 => '1.20MB'
            1253656678 => '1.17GB'
        """
        factor = 1024
        for unit in ["", "K", "M", "G", "T", "P"]:
            if bytes < factor:
                return f"{bytes:.2f}{unit}{suffix}"
            bytes /= factor

    #System Information
    print("=" * 40, "System Information", "=" * 40)
    uname = platform.uname()
    print(f"System: {uname.system}")
    print(f"Node Name: {uname.node}")
    print(f"Release: {uname.release}")
    print(f"Version: {uname.version}")
    print(f"Machine: {uname.machine}")
    print(f"Processor: {uname.processor}")

    # Boot Time
    print("=" * 40, "Boot Time", "=" * 40)
    boot_time_timestamp = psutil.boot_time()
    bt = datetime.fromtimestamp(boot_time_timestamp)
    print(
        f"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}"
    )

    ## CPU Information
    # let's print CPU information
    print("=" * 40, "CPU Info", "=" * 40)
    # number of cores
    print("Physical cores:", psutil.cpu_count(logical=False))
    print("Total cores:", psutil.cpu_count(logical=True))
    # CPU frequencies
    cpufreq = psutil.cpu_freq()
    print(f"Max Frequency: {cpufreq.max:.2f}Mhz")
    print(f"Min Frequency: {cpufreq.min:.2f}Mhz")
    print(f"Current Frequency: {cpufreq.current:.2f}Mhz")
    # CPU usage
    print("CPU Usage Per Core:")
    for i, percentage in enumerate(psutil.cpu_percent(percpu=True,
                                                      interval=1)):
        print(f"Core {i}: {percentage}%")
    print(f"Total CPU Usage: {psutil.cpu_percent()}%")

    ## Memory Usage
    # Memory Information
    print("=" * 40, "Memory Information", "=" * 40)
    # get the memory details
    svmem = psutil.virtual_memory()
    print(f"Total: {get_size(svmem.total)}")
    print(f"Available: {get_size(svmem.available)}")
    print(f"Used: {get_size(svmem.used)}")
    print(f"Percentage: {svmem.percent}%")
    print("=" * 20, "SWAP", "=" * 20)
    # get the swap memory details (if exists)
    swap = psutil.swap_memory()
    print(f"Total: {get_size(swap.total)}")
    print(f"Free: {get_size(swap.free)}")
    print(f"Used: {get_size(swap.used)}")
    print(f"Percentage: {swap.percent}%")

    # Disk Information
    print("=" * 40, "Disk Information", "=" * 40)
    print("Partitions and Usage:")
    # get all disk partitions
    partitions = psutil.disk_partitions()
    for partition in partitions:
        print(f"=== Device: {partition.device} ===")
        print(f"  Mountpoint: {partition.mountpoint}")
        print(f"  File system type: {partition.fstype}")
        try:
            partition_usage = psutil.disk_usage(partition.mountpoint)
        except PermissionError:
            # this can be catched due to the disk that
            # isn't ready
            continue
        print(f"  Total Size: {get_size(partition_usage.total)}")
        print(f"  Used: {get_size(partition_usage.used)}")
        print(f"  Free: {get_size(partition_usage.free)}")
        print(f"  Percentage: {partition_usage.percent}%")
    # get IO statistics since boot
    disk_io = psutil.disk_io_counters()
    print(f"Total read: {get_size(disk_io.read_bytes)}")
    print(f"Total write: {get_size(disk_io.write_bytes)}")

    # Network information
    print("=" * 40, "Network Information", "=" * 40)
    # get all network interfaces (virtual and physical)
    if_addrs = psutil.net_if_addrs()
    for interface_name, interface_addresses in if_addrs.items():
        for address in interface_addresses:
            print(f"=== Interface: {interface_name} ===")
            if str(address.family) == 'AddressFamily.AF_INET':
                print(f"  IP Address: {address.address}")
                print(f"  Netmask: {address.netmask}")
                print(f"  Broadcast IP: {address.broadcast}")
            elif str(address.family) == 'AddressFamily.AF_PACKET':
                print(f"  MAC Address: {address.address}")
                print(f"  Netmask: {address.netmask}")
                print(f"  Broadcast MAC: {address.broadcast}")
    # get IO statistics since boot
    net_io = psutil.net_io_counters()
    print(f"Total Bytes Sent: {get_size(net_io.bytes_sent)}")
    print(f"Total Bytes Received: {get_size(net_io.bytes_recv)}")
    system_df = pd.DataFrame(
        index=[uname.system, uname.node, uname.machine, uname.processor])

    print(system_df.head())
Example #47
0
    def __init__(self, name: str, Delta: float, Lambda: float, Omega: float,
                 instances: Instances, nodestack,
                 blacklister: Blacklister, nodeInfo: Dict,
                 notifierEventTriggeringConfig: Dict,
                 pluginPaths: Iterable[str] = None,
                 notifierEventsEnabled: bool = True):
        self.name = name
        self.instances = instances
        self.nodestack = nodestack
        self.blacklister = blacklister
        self.nodeInfo = nodeInfo
        self.notifierEventTriggeringConfig = notifierEventTriggeringConfig
        self.notifierEventsEnabled = notifierEventsEnabled

        self.Delta = Delta
        self.Lambda = Lambda
        self.Omega = Omega
        self.statsConsumers = self.getPluginsByType(pluginPaths,
                                                    PLUGIN_TYPE_STATS_CONSUMER)

        self.config = getConfig()

        # Number of ordered requests by each replica. The value at index `i` in
        # the list is a tuple of the number of ordered requests by replica and
        # the time taken to order those requests by the replica of the `i`th
        # protocol instance
        self.numOrderedRequests = []  # type: List[Tuple[int, int]]

        # Utility object for tracking requests order start and end
        # TODO: Has very similar cleanup logic to propagator.Requests
        self.requestTracker = RequestTimeTracker(instances.count)

        # Request latencies for the master protocol instances. Key of the
        # dictionary is a tuple of client id and request id and the value is
        # the time the master instance took for ordering it
        self.masterReqLatencies = {}  # type: Dict[Tuple[str, int], float]

        # Indicates that request latency in previous snapshot of master req
        # latencies was too high
        self.masterReqLatencyTooHigh = False

        # Request latency(time taken to be ordered) for the client. The value
        # at index `i` in the list is the dictionary where the key of the
        # dictionary is the client id and the value is a tuple of number of
        # requests and average time taken by that number of requests for the
        # `i`th protocol instance
        self.clientAvgReqLatencies = []  # type: List[Dict[str, Tuple[int, float]]]

        # TODO: Set this if this monitor belongs to a node which has primary
        # of master. Will be used to set `totalRequests`
        self.hasMasterPrimary = None

        # Total requests that have been ordered since the node started
        self.totalRequests = 0

        self.started = datetime.utcnow().isoformat()

        # Times of requests ordered by master in last
        # `ThroughputWindowSize` seconds. `ThroughputWindowSize` is
        # defined in config
        self.orderedRequestsInLast = []

        # Times and latencies (as a tuple) of requests ordered by master in last
        # `LatencyWindowSize` seconds. `LatencyWindowSize` is
        # defined in config
        self.latenciesByMasterInLast = []

        # Times and latencies (as a tuple) of requests ordered by backups in last
        # `LatencyWindowSize` seconds. `LatencyWindowSize` is
        # defined in config. Dictionary where key corresponds to instance id and
        #  value is a tuple of ordering time and latency of a request
        self.latenciesByBackupsInLast = {}

        # attention: handlers will work over unordered request only once
        self.unordered_requests_handlers = []  # type: List[Callable]

        # Monitoring suspicious spikes in cluster throughput
        self.clusterThroughputSpikeMonitorData = {
            'value': 0,
            'cnt': 0,
            'accum': []
        }

        psutil.cpu_percent(interval=None)
        self.lastKnownTraffic = self.calculateTraffic()

        self.totalViewChanges = 0
        self._lastPostedViewChange = 0
        HasActionQueue.__init__(self)

        if self.config.SendMonitorStats:
            self.startRepeating(self.sendPeriodicStats,
                                self.config.DashboardUpdateFreq)

        self.startRepeating(
            self.checkPerformance,
            self.config.notifierEventTriggeringConfig['clusterThroughputSpike']['freq'])

        self.startRepeating(self.check_unordered, self.config.UnorderedCheckFreq)

        if 'disable_view_change' in self.config.unsafe:
            self.isMasterDegraded = lambda: False
        if 'disable_monitor' in self.config.unsafe:
            self.requestOrdered = lambda *args, **kwargs: {}
            self.sendPeriodicStats = lambda: None
            self.checkPerformance = lambda: None
Example #48
0
    async def stats(self, ctx):
        _("""Statistics on the bot.""")
        async with self.bot.pool.acquire() as conn:
            characters = await conn.fetchval("SELECT COUNT(*) FROM profile;")
            items = await conn.fetchval("SELECT COUNT(*) FROM allitems;")
            pg_version = conn.get_server_version()
        pg_version = f"{pg_version.major}.{pg_version.micro} {pg_version.releaselevel}"
        d0 = self.bot.user.created_at
        d1 = datetime.datetime.now()
        delta = d1 - d0
        myhours = delta.days * 1.5
        sysinfo = distro.linux_distribution()
        if self.bot.owner_ids:
            owner = " and ".join(
                [str(await self.bot.get_user_global(u)) for u in self.bot.owner_ids]
            )
        else:
            owner = str(await self.bot.get_user_global(self.bot.owner_id))
        guild_count = sum(
            await self.bot.cogs["Sharding"].handler("guild_count", self.bot.shard_count)
        )
        meminfo = psutil.virtual_memory()

        embed = discord.Embed(
            title=_("IdleRPG Statistics"),
            colour=0xB8BBFF,
            url=self.bot.BASE_URL,
            description=_("Official Support Server Invite: https://discord.gg/MSBatf6"),
        )
        embed.set_thumbnail(url=self.bot.user.avatar_url)
        embed.set_footer(
            text=f"IdleRPG {self.bot.version} | By {owner}",
            icon_url=self.bot.user.avatar_url,
        )
        embed.add_field(
            name=_("Hosting Statistics"),
            value=_(
                """\
CPU Usage: **{cpu}%**, **{cores}** cores @ **{freq}** GHz
RAM Usage: **{ram}%** (Total: **{total_ram}**)
Python Version **{python}** <:python:445247273065250817>
discord.py Version **{dpy}**
Operating System: **{osname} {osversion}**
Kernel Version: **{kernel}**
PostgreSQL Version **{pg_version}**"""
            ).format(
                cpu=psutil.cpu_percent(),
                cores=psutil.cpu_count(),
                freq=psutil.cpu_freq().max / 1000,
                ram=meminfo.percent,
                total_ram=humanize.naturalsize(meminfo.total),
                python=platform.python_version(),
                dpy=pkg.get_distribution("discord.py").version,
                osname=sysinfo[0].title(),
                osversion=sysinfo[1],
                kernel=os.uname().release,
                pg_version=pg_version,
            ),
            inline=False,
        )
        embed.add_field(
            name=_("Bot Statistics"),
            value=_(
                """\
Code lines written: **{lines}**
Shards: **{shards}**
Servers: **{guild_count}**
Characters: **{characters}**
Items: **{items}**
Average hours of work: **{hours}**"""
            ).format(
                lines=self.bot.linecount,
                shards=self.bot.shard_count,
                guild_count=guild_count,
                characters=characters,
                items=items,
                hours=myhours,
            ),
            inline=False,
        )
        await ctx.send(embed=embed)
Example #49
0
import time
import json
import urllib, urllib2
import socket
url = "http://localhost:5000/post"

while True:
    print(chr(27) + "[2J")

    cpu, disks, host_info, memory, network, processes, other = [],[],[],[],[],[],[]

    host_info.append(int(time.time()))
    host_info.append(socket.getfqdn())

    cpu.append(vars(psutil.cpu_times()))
    cpu.append(psutil.cpu_percent(percpu=True))

    memory.append(vars(psutil.virtual_memory()))
    memory.append(vars(psutil.swap_memory()))

    disks.append(vars(psutil.disk_io_counters()))

    if_addr = {}
    for k, v in psutil.net_if_addrs().iteritems():
        values = []
        for i in v:
            values.append(vars(i))
        if_addr[k] = values

    netio = {}
    for k, v in psutil.net_io_counters(pernic=True).iteritems():
def check_cpu():
    """Report an error if CPU usage is over 80%"""
    if psutil.cpu_percent(interval=1) > 80:
        print("Error - CPU usage is over 80%")
        send_email("Error - CPU usage is over 80%")
Example #51
0
    def on_chat_message(self, msg):
        content_type, chat_type, chat_id = telepot.glance(msg)
        # Do your stuff according to `content_type` ...
        print("Your chat_id:" +
              str(chat_id))  # this will tell you your chat_id
        if chat_id in adminchatid:  # Store adminchatid variable in tokens.py
            if content_type == 'text':
                if msg['text'] == '/stats' and chat_id not in shellexecution:
                    bot.sendChatAction(chat_id, 'typing')
                    memory = psutil.virtual_memory()
                    disk = psutil.disk_usage('/')
                    boottime = datetime.fromtimestamp(psutil.boot_time())
                    now = datetime.now()
                    timedif = "在线时间: %.1f 小时" % ((
                        (now - boottime).total_seconds()) / 3600)
                    memtotal = "总内存: %.2f GB " % (memory.total / 1000000000)
                    memavail = "可用内存: %.2f GB" % (memory.available /
                                                  1000000000)
                    memuseperc = "使用内存: " + str(memory.percent) + " %"
                    diskused = "磁盘占用: " + str(disk.percent) + " %"
                    cpu = "当前CPU占用率" + str(
                        psutil.cpu_percent(interval=1)) + "%"
                    pids = psutil.pids()
                    pidsreply = ''
                    procs = {}
                    for pid in pids:
                        p = psutil.Process(pid)
                        try:
                            pmem = p.memory_percent()
                            if pmem > 0.5:
                                if p.name() in procs:
                                    procs[p.name()] += pmem
                                else:
                                    procs[p.name()] = pmem
                        except:
                            print("Hm")
                    sortedprocs = sorted(procs.items(),
                                         key=operator.itemgetter(1),
                                         reverse=True)
                    for proc in sortedprocs:
                        pidsreply += proc[0] + " " + ("%.2f" %
                                                      proc[1]) + " %\n"
                    reply = timedif + "\n" + \
                            memtotal + "\n" + \
                            memavail + "\n" + \
                            memuseperc + "\n" + \
                            diskused + "\n" + \
                            cpu + "\n" + \
                            pidsreply
                    bot.sendMessage(chat_id,
                                    reply,
                                    disable_web_page_preview=True)
                elif msg['text'] == "help" or msg['text'] == "/help" or msg[
                        'text'] == "/start":
                    bot.sendMessage(chat_id, "以下命令可用")
                    bot.sendMessage(chat_id, "/stats -检查磁盘/CPU/内存使用情况")
                    bot.sendMessage(chat_id, "/shell -字面意思")
                    bot.sendMessage(chat_id, "/memgraph -绘制近一段时间的内存使用记录表")
                    bot.sendMessage(chat_id,
                                    "/setmem -设置内存占用告警阈值,并在占用情况高于这个值是告警")
                    bot.sendMessage(chat_id, "/setpoll -设置探测间隔(不少于10秒)")
                    bot.sendMessage(chat_id, "/stop -AZ5")
                elif msg['text'] == "Stop":
                    clearall(chat_id)
                    bot.sendMessage(chat_id,
                                    "所有操作已经停止了",
                                    reply_markup=hide_keyboard)
                elif msg['text'] == '/setpoll' and chat_id not in setpolling:
                    bot.sendChatAction(chat_id, 'typing')
                    setpolling.append(chat_id)
                    bot.sendMessage(chat_id,
                                    "发给我一个新的探测间隔(不少于10秒)",
                                    reply_markup=stopmarkup)
                elif chat_id in setpolling:
                    bot.sendChatAction(chat_id, 'typing')
                    try:
                        global poll
                        poll = int(msg['text'])
                        if poll > 10:
                            bot.sendMessage(chat_id, "整好了!")
                            clearall(chat_id)
                        else:
                            1 / 0
                    except:
                        bot.sendMessage(chat_id, "需要大于等于10秒,再来一次")
                elif msg['text'] == "/shell" and chat_id not in shellexecution:
                    bot.sendMessage(chat_id,
                                    "发给我一条命令以执行",
                                    reply_markup=stopmarkup)
                    shellexecution.append(chat_id)
                elif msg['text'] == "/setmem" and chat_id not in settingmemth:
                    bot.sendChatAction(chat_id, 'typing')
                    settingmemth.append(chat_id)
                    bot.sendMessage(chat_id,
                                    "发给我一个新的内存占用告警阈值?",
                                    reply_markup=stopmarkup)
                elif chat_id in settingmemth:
                    bot.sendChatAction(chat_id, 'typing')
                    try:
                        global memorythreshold
                        memorythreshold = int(msg['text'])
                        if memorythreshold < 100:
                            bot.sendMessage(chat_id, "整好了!")
                            clearall(chat_id)
                        else:
                            1 / 0
                    except:
                        bot.sendMessage(chat_id, "都说了要小于100啊")

                elif chat_id in shellexecution:
                    bot.sendChatAction(chat_id, 'typing')
                    p = Popen(msg['text'],
                              shell=True,
                              stdin=PIPE,
                              stdout=PIPE,
                              stderr=STDOUT,
                              close_fds=True)
                    output = p.stdout.read()
                    if output != b'':
                        bot.sendMessage(chat_id,
                                        output,
                                        disable_web_page_preview=True)
                    else:
                        bot.sendMessage(chat_id,
                                        "没有输出",
                                        disable_web_page_preview=True)
                elif msg['text'] == '/memgraph':
                    bot.sendChatAction(chat_id, 'typing')
                    tmperiod = "Last %.2f hours" % (
                        (datetime.now() - graphstart).total_seconds() / 3600)
                    bot.sendPhoto(chat_id,
                                  plotmemgraph(memlist, xaxis, tmperiod))
Example #52
0
 def __get_cpu(self):
     self.__cpu = dict()
     self.__cpu['count'] = psutil.cpu_count(logical=True)
     self.__cpu['percent'] = psutil.cpu_percent(interval=1, percpu=True)
     return self.__cpu
Example #53
0
import socket
import time

HOST = "127.0.0.1"  # The server's hostname or IP address
PORT = 65432  # The port used by the server
SLEEP_INTERVAL = 0.5

logging.basicConfig(level=logging.INFO)

with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
    logging.info('Connecting: {} : {}'.format(HOST, PORT))
    s.connect((HOST, PORT))

    while True:
        logging.info("Getting system data...")
        cpu = psutil.cpu_percent(interval=None, percpu=True)

        mem = psutil.virtual_memory()
        swap = psutil.swap_memory()
        mem_used_pct = (mem.total - mem.available) * 100.0 / mem.total

        df = psutil.disk_usage("/")

        address = "Unknown"
        bytes_recv = 0
        bytes_sent = 0
        counters = []
        if "wlan0" in psutil.net_if_addrs():
            address = psutil.net_if_addrs()["wlan0"][0].address
            bytes_recv = psutil.net_io_counters(
                pernic=True)["wlan0"].bytes_recv
Example #54
0
import psutil
import subprocess

subprocess.call(["free", "-m"])
print("Cpu:" + str(psutil.cpu_percent(interval=1)))
subprocess.call(["df", "-h"])
 def getCpuMemory(self):
     """获取CPU和内存状态信息"""
     cpuPercent = psutil.cpu_percent()
     memoryPercent = psutil.virtual_memory().percent
     return vtText.CPU_MEMORY_INFO.format(cpu=cpuPercent, memory=memoryPercent)
Example #56
0
                     data=values,
                     auth=('hello', 'nishil123'))


def setTmpState(val):
    values = {'name': val}
    r = requests.put('http://127.0.0.1:8000/tmp/1/',
                     data=values,
                     auth=('hello', 'nishil123'))


def setCpuState(val):
    values = {'name': val}
    r = requests.put('http://127.0.0.1:8000/Cpu/1/',
                     data=values,
                     auth=('hello', 'nishil123'))


while True:
    try:
        Cpu = psutil.cpu_percent()
        tmp = get_temperature()

        if Cpu is None or tmp is None:
            time.sleep(2)
            continue
        runController()
        time.sleep(10)
    except KeyboardInterrupt:
        exit()
Example #57
0
    def run(self, when = True, false_return = None):

        start_time = datetime.utcnow()

        if when is True:

            try:
                self.logStart()
                self.outgoing = self.body()

                ret_ids = []

                if isinstance(self.outgoing, collections.Iterable):
                    for i in self.outgoing:
                        if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
                            ret_ids.append(str(i.id))

                else:
                    if isinstance(self.outgoing, Object) or isinstance(self.outgoing, File) or isinstance(self.outgoing,
                                                                                                          Document):
                        ret_ids.append(str(self.outgoing.id))



                msg = {
                    'o@': ret_ids,
                    'event': 'BODY-TRU',
                    'id': str(self.id),
                    'name': str(self.__class__.__name__),
                    'user': str(USER.id),
                    'duration_run': str(datetime.utcnow()-start_time),
                    'memory_run': (psutil.virtual_memory()[2])*(.000001),
                    'cpu_run': (psutil.cpu_percent()),
                    'time_run': str(datetime.utcnow()),
                    'error': 'success'
                }

                deb.debug(jsonify(msg))

                elasticmodule(msg['id'], msg['time_run'], msg['name'], msg['user'], msg['memory_run'], 0,
                              msg['cpu_run'], 0, msg['duration_run'], "00:00:00.000000", msg['error'])

                ''' relationships '''
                for uninqid in ret_ids:
                    # match (n:Object{id:uniqid}), (m:Module{id:id})
                    # create (m)-[:OUT]-> (n)

                    query = ' match (n),(m) where n.id = \"' + uninqid + '\" and m.id = \"' + msg['id'] + '\" create (m)-[:OUT]-> (n) ' + 'set m.duration_run = \"' + str(msg['duration_run']) + '\"' + ', m.cpu_run = \"' + str(msg['cpu_run']) + '\"' + ', m.memory_run = \"' + str(msg['memory_run']) + '\"' + ', m.time_run = \"' + str(msg['time_run']) + '\"'
                    #print query
                    graph.run(query)

                self.logEnd()



                return self.outgoing


            except Exception as e:



                msg = {
                    'event': 'MOD-RUN',
                    'id': str(self.id),
                    'name': str(self.__class__.__name__),
                    'user': str(USER.id),
                    'error': 'MRE',
                    'duration_run': str(datetime.utcnow() - start_time),
                    'memory_run': (psutil.virtual_memory()[2])*(.000001),
                    'cpu_run': (psutil.cpu_percent()),
                    'time_run': str(datetime.utcnow()),
                    'label': 'module'
                }

                err.error(jsonify(msg))

                elasticmodule(msg['id'], msg['time_run'], msg['name'], msg['user'], msg['memory_run'], 0,
                              msg['cpu_run'], 0, msg['duration_run'], "00:00:00.000000", msg['error'])

                ''' GDB part '''
                #props = ['NAME:\"' + msg['name'] + '\"', 'id:\"' + msg['id'] + '\"',
                #         'user:\"' + msg['user'] + '\"', 'error:\"' + msg['error'] + '\"', 'time:\"' + str(msg['time']) + '\"']
                #propsQuery = ','.join(props)
                # print propsQuery

                query = 'match (n:Module{id:\"' + msg['id'] + '\"}) set n.error = \"' + str(msg['error']) + '\"' + ', n.duration_run = \"' + str(msg['duration_run']) + '\"' + ', n.cpu_run = \"' + str(msg['cpu_run']) + '\"' + ', n.memory_run = \"' + str(msg['memory_run']) + '\"' + ', n.time_run = \"' + str(msg['time_run']) + '\"' + ', n.label = \"' + str(msg['label']) + '\"'
                #print query
                graph.run(query)



        else:
            '''not using this part for implementation'''

            ret_ids = []

            for i in false_return:
                ret_ids.append(str(i))



            msg = {
                'o@': ret_ids,
                'event': 'BODY-FLS',
                'id': str(self.id),
                'name': str(self.__class__.__name__),
                'user': str(USER.id),
                'duration': str(datetime.utcnow() - start_time),
                'memory': (psutil.virtual_memory()[2])*(.000001),
                'cpu': (psutil.cpu_percent()),
                'time': str(datetime.utcnow()),
                'error': 'success'
            }


            deb.debug(jsonify(msg))


            return false_return
Example #58
0
    def __init__(self, reference):
        # type: (Any) -> None

        self.id = uuid.uuid4()
        self.user = USER.id

        if failure is True:
            # precondition management
            pass

        else:
            # if all preconditions passed
            try:
                self.ref = reference


                msg = {
                    'event': 'OB-CRTN',
                    'id': str(self.id),
                    'type': str(type(reference)),
                    'value': str(reference),
                    'user': str(USER.id),
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'object',
                    'error': 'success'
                }



                deb.debug(jsonify(msg))

                elasticobject(msg['id'], msg['type'], msg['value'], msg['user'], msg['memory'], msg['cpu'], msg['time'], msg['error'])

                ''' GDB part '''
                props = ['VALUE:\"' + msg['value'] + '\"', 'id:\"' + msg['id'] + '\"', 'type:\"' + msg['type'] + '\"', 'user:\"' + msg['user'] + '\"', 'error:\"null\"', 'time:\"' + str(msg['time']) + '\"', 'memory:\"' + str(msg['memory']) + '\"', 'cpu:\"' + str(msg['cpu']) + '\"', 'label:\"' + str(msg['label']) + '\"']
                propsQuery = ','.join(props)
                #print propsQuery
                query = 'create (n:Object{' + propsQuery + '})'
                graph.run(query)







            except Exception as e:
                # if any further error occurs somehow

                #pid = os.getpid()
                #py = psutil.Process(pid)
                #memoryUse = py.memory_info()[0] / 2. ** 30

                msg = {
                    'event': 'OB-CRTN',
                    'id': str(self.id),
                    'type': str(type(reference)),
                    'value': str(reference),
                    'user': str(USER.id),
                    'error': 'OCE',
                    'memory': (psutil.virtual_memory()[2])*(.000001),
                    'cpu': (psutil.cpu_percent()),
                    'time': str(datetime.utcnow()),
                    'label': 'object'
                }


                err.error(jsonify(msg))

                elasticobject(msg['id'], msg['type'], msg['value'], msg['user'], msg['memory'], msg['cpu'],
                               msg['time'], msg['error'])


                ''' GDB part '''
                props = ['VALUE:\"' + msg['value'] + '\"', 'id:\"' + msg['id'] + '\"', 'type:\"' + msg['type'] + '\"', 'user:\"' + msg['user'] + '\"', 'error:\"' + msg['error'] + '\"', 'time:\"' + str(msg['time']) + '\"', 'memory:\"' + str(msg['memory']) + '\"', 'cpu:\"' + str(msg['cpu']) + '\"', 'label:\"' + str(msg['label']) + '\"']
                propsQuery = ','.join(props)
                #print propsQuery
                query = 'create (n:Object{' + propsQuery + '})'
                graph.run(query)
Example #59
0
    def __init__(self, *args):

        start_time = datetime.utcnow()

        self.id = uuid.uuid4()
        self.user = USER.id

        self.P = args

        try:
            param_ids = []

            for i in args:

                if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
                    param_ids.append(str(i.id))

                else:
                    param_ids.append(str(i))



            msg = {
                'p@': param_ids,
                'event': 'MOD-CRTN',
                'id': str(self.id),
                'name': str(self.__class__.__name__),
                'user': str(USER.id),
                'memory_init': (psutil.virtual_memory()[2])*(.000001),
                'cpu_init': (psutil.cpu_percent()),
                'duration_init': str(datetime.utcnow()-start_time),
                'time': str(datetime.utcnow()),
                'cpu_run': 0,
                'memory_run': 0,
                'duration_run': "00:00:00.000000",
                'label': 'module',
                'error': 'success'
            }

            deb.debug(jsonify(msg))

            elasticmodule(msg['id'], msg['time'], msg['name'], msg['user'], msg['memory_run'], msg['memory_init'], msg['cpu_run'], msg['cpu_init'], msg['duration_run'], msg['duration_init'], msg['error'])

            ''' GDB part '''
            props = ['NAME:\"' + msg['name'] + '\"', 'id:\"' + msg['id'] + '\"',
                     'user:\"' + msg['user'] + '\"', 'error:\"null\"', 'time:\"' + str(msg['time']) + '\"', 'memory_init:\"' + str(msg['memory_init']) + '\"',
                     'cpu_init:\"' + str(msg['cpu_init']) + '\"', 'duration_init:\"' + str(msg['duration_init']) + '\"',
                     'duration_run:\"' + str(msg['duration_run']) + '\"', 'memory_run:\"' + str(msg['memory_run']) + '\"', 'cpu_run:\"' + str(msg['cpu_run']) + '\"',
                     'label:\"' + str(msg['label']) + '\"']
            propsQuery = ','.join(props)
            #print propsQuery
            query = 'create (n:Module{' + propsQuery + '})'
            graph.run(query)


            ''' relationships '''
            for uninqid in param_ids:
                # match (n:Object{id:uniqid}), (m:Module{id:id})
                # create (n)-[:IN]-> (m)

                query = ' match (n),(m) where n.id = \"' + uninqid + '\" and m.id = \"' + msg['id'] + '\" create (n)-[:IN]-> (m)'
                #print query
                graph.run(query)



        except Exception as e:

            param_ids = []

            for i in args:
                if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
                    param_ids.append(str(i.id))

                else:
                    param_ids.append(str(i))



            msg = {
                'p@': param_ids,
                'event': 'MOD-CRTN',
                'id': str(self.id),
                'name': str(self.__class__.__name__),
                'user': str(USER.id),
                'error': 'MIE',
                'memory_init': (psutil.virtual_memory()[2])*(.000001),
                'cpu_init': (psutil.cpu_percent()),
                'duration_init': str(datetime.utcnow()-start_time),
                'time': str(datetime.utcnow()),
                'cpu_run': 0,
                'memory_run': 0,
                'duration_run': "00:00:00.000000",
                'label': 'module'
            }

            #print msg
            err.error(jsonify(msg))

            elasticmodule(msg['id'], msg['time'], msg['name'], msg['user'], msg['memory_run'], msg['memory_init'],
                          msg['cpu_run'], msg['cpu_init'], msg['duration_run'], msg['duration_init'], msg['error'])

            ''' GDB part '''
            props = ['NAME:\"' + msg['name'] + '\"', 'id:\"' + msg['id'] + '\"',
                     'user:\"' + msg['user'] + '\"', 'error:\"' + msg['error'] + '\"', 'time:\"' + str(msg['time']) + '\"',
                     'memory_init:\"' + str(msg['memory_init']) + '\"', 'cpu_init:\"' + str(msg['cpu_init']) + '\"',
                     'duration_init:\"' + str(msg['duration_init']) + '\"',
                     'cpu_run:\"' + str(msg['cpu_run']) + '\"', 'memory_run:\"' + str(msg['memory_run']) + '\"', 'duration_run:\"' + str(msg['duration_run']) + '\"',
                     'label:\"' + str(msg['label']) + '\"']
            propsQuery = ','.join(props)
            #print propsQuery
            query = 'create (n:Module{' + propsQuery + '})'
            graph.run(query)
Example #60
-1
def main():
    try:
        cpus = psutil.cpu_percent(interval=0,percpu=True)
        header=[]
	cpu_sum = []
        for i in range(0, len(cpus)):
            header.append('Core-' + str(i) + '_usage_percent')
        
        f = open(options.filename, 'w')
        f.write('Total_CPU_usage_percent,Memory_usage_percent,Throughput_in_Mbps,Throughput_out_Mbps,Total_CPU_usage_iowait_percent' + ',' + ",".join(header) + "," + "Read_speed_MBps" + "," + "Write_speed_MBps" + "," + "Avg_CPU_usage"+'\n')
        interval = 1
        bytes_received_prev, bytes_sent_prev = get_network_bytes(options.interface)
	bytes_read_prev = psutil.disk_io_counters(perdisk=False).read_bytes
	bytes_written_prev = psutil.disk_io_counters(perdisk=False).write_bytes
        prev_time = time.time()
        time.sleep(interval)
        
        while 1:
            cpu = str(psutil.cpu_percent(interval=0))
            mem = str(psutil.virtual_memory().percent)
            cpus = psutil.cpu_percent(interval=0,percpu=True)
	    cpu_iowait = str(psutil.cpu_times_percent(interval=0,percpu=False).iowait)
            bytes_received_curr, bytes_sent_curr = get_network_bytes(options.interface)
	    bytes_read_curr = psutil.disk_io_counters(perdisk=False).read_bytes
	    bytes_written_curr = psutil.disk_io_counters(perdisk=False).write_bytes

            # calculate elapsed time
            curr_time = time.time()
            elapsed_time = curr_time - prev_time
            prev_time = curr_time;

	    # calculate network throughput
            throughput_in = (((bytes_received_curr - bytes_received_prev) * 8.0) / elapsed_time) / 1000000.0
            throughput_out = (((bytes_sent_curr - bytes_sent_prev) * 8.0) / elapsed_time) / 1000000.0
	    
	    # calculate disk throughput
	    disk_io_read = ((float)  (bytes_read_curr - bytes_read_prev) / (1024.0 * 1024.0)) / elapsed_time
	    disk_io_write = ((float) (bytes_written_curr - bytes_written_prev) / (1024.0 * 1024.0)) / elapsed_time

	    # update variables
            bytes_received_prev = bytes_received_curr
            bytes_sent_prev = bytes_sent_curr
	    bytes_read_prev = bytes_read_curr
	    bytes_written_prev = bytes_written_curr

	    # round throughputs to 5 decimals
            incoming = str(round(throughput_in,5))
            outgoing = str(round(throughput_out,5))
	    read = str(round(disk_io_read,5))
	    write = str(round(disk_io_write,5))
	    cpu_sum.append(float(cpu))
	    cpu_avg = str(round(sum(cpu_sum)/float(len(cpu_sum)),5))

	    # append record to csv file
            f.write(cpu + ',' + mem + ',' + incoming + ',' + outgoing + ',' + cpu_iowait + ',' + ",".join(map(str,cpus)) + "," + read + "," + write + "," + cpu_avg + '\n')
            f.flush()
            time.sleep(interval)
    except (KeyboardInterrupt, SystemExit):
        print "Exiting."
        pass