def memory_usage(proc=-1,
                 interval=.1,
                 timeout=None,
                 timestamps=False,
                 include_children=False,
                 multiprocess=False,
                 max_usage=False,
                 retval=False,
                 stream=None,
                 backend=None):
    """
    Return the memory usage of a process or piece of code

    Parameters
    ----------
    proc : {int, string, tuple, subprocess.Popen}, optional
        The process to monitor. Can be given by an integer/string
        representing a PID, by a Popen object or by a tuple
        representing a Python function. The tuple contains three
        values (f, args, kw) and specifies to run the function
        f(*args, **kw).
        Set to -1 (default) for current process.

    interval : float, optional
        Interval at which measurements are collected.

    timeout : float, optional
        Maximum amount of time (in seconds) to wait before returning.

    max_usage : bool, optional
        Only return the maximum memory usage (default False)

    retval : bool, optional
        For profiling python functions. Save the return value of the profiled
        function. Return value of memory_usage becomes a tuple:
        (mem_usage, retval)

    timestamps : bool, optional
        if True, timestamps of memory usage measurement are collected as well.

    include_children : bool, optional
        if True, sum the memory of all forked processes as well

    multiprocess : bool, optional
        if True, track the memory usage of all forked processes.

    stream : File
        if stream is a File opened with write access, then results are written
        to this file instead of stored in memory and returned at the end of
        the subprocess. Useful for long-running processes.
        Implies timestamps=True.

    Returns
    -------
    mem_usage : list of floating-point values
        memory usage, in MiB. It's length is always < timeout / interval
        if max_usage is given, returns the two elements maximum memory and
        number of measurements effectuated
    ret : return value of the profiled function
        Only returned if retval is set to True
    """
    backend = choose_backend(backend)
    if stream is not None:
        timestamps = True

    if not max_usage:
        ret = []
    else:
        ret = -1

    if timeout is not None:
        max_iter = int(timeout / interval)
    elif isinstance(proc, int):
        # external process and no timeout
        max_iter = 1
    else:
        # for a Python function wait until it finishes
        max_iter = float('inf')

    if callable(proc):
        proc = (proc, (), {})
    if isinstance(proc, (list, tuple)):
        if len(proc) == 1:
            f, args, kw = (proc[0], (), {})
        elif len(proc) == 2:
            f, args, kw = (proc[0], proc[1], {})
        elif len(proc) == 3:
            f, args, kw = (proc[0], proc[1], proc[2])
        else:
            raise ValueError

        while True:
            child_conn, parent_conn = Pipe(
            )  # this will store MemTimer's results
            p = MemTimer(os.getpid(),
                         interval,
                         child_conn,
                         backend,
                         timestamps=timestamps,
                         max_usage=max_usage,
                         include_children=include_children)
            p.start()
            parent_conn.recv()  # wait until we start getting memory

            # When there is an exception in the "proc" - the (spawned) monitoring processes don't get killed.
            # Therefore, the whole process hangs indefinitely. Here, we are ensuring that the process gets killed!
            try:
                returned = f(*args, **kw)
                parent_conn.send(0)  # finish timing
                ret = parent_conn.recv()
                n_measurements = parent_conn.recv()
                if retval:
                    ret = ret, returned
            except Exception:
                parent = psutil.Process(os.getpid())
                for child in parent.children(recursive=True):
                    os.kill(child.pid, SIGKILL)
                p.join(0)
                raise

            p.join(5 * interval)
            if n_measurements > 4 or interval < 1e-6:
                break
            interval /= 10.
    elif isinstance(proc, subprocess.Popen):
        # external process, launched from Python
        line_count = 0
        while True:
            if not max_usage:
                mem_usage = _get_memory(proc.pid,
                                        backend,
                                        timestamps=timestamps,
                                        include_children=include_children)

                if stream is not None:
                    stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))

                    # Write children to the stream file
                    if multiprocess:
                        for idx, chldmem in enumerate(
                                _get_child_memory(proc.pid)):
                            stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(
                                idx, chldmem, time.time()))
                else:
                    # Create a nested list with the child memory
                    if multiprocess:
                        mem_usage = [mem_usage]
                        for chldmem in _get_child_memory(proc.pid):
                            mem_usage.append(chldmem)

                    # Append the memory usage to the return value
                    ret.append(mem_usage)
            else:
                ret = max(
                    ret,
                    _get_memory(proc.pid,
                                backend,
                                include_children=include_children))
            time.sleep(interval)
            line_count += 1
            # flush every 50 lines. Make 'tail -f' usable on profile file
            if line_count > 50:
                line_count = 0
                if stream is not None:
                    stream.flush()
            if timeout is not None:
                max_iter -= 1
                if max_iter == 0:
                    break
            if proc.poll() is not None:
                break
    else:
        # external process
        if max_iter == -1:
            max_iter = 1
        counter = 0
        while counter < max_iter:
            counter += 1
            if not max_usage:
                mem_usage = _get_memory(proc,
                                        backend,
                                        timestamps=timestamps,
                                        include_children=include_children)
                if stream is not None:
                    stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))

                    # Write children to the stream file
                    if multiprocess:
                        for idx, chldmem in enumerate(
                                _get_child_memory(proc.pid)):
                            stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(
                                idx, chldmem, time.time()))
                else:
                    # Create a nested list with the child memory
                    if multiprocess:
                        mem_usage = [mem_usage]
                        for chldmem in _get_child_memory(proc.pid):
                            mem_usage.append(chldmem)

                    # Append the memory usage to the return value
                    ret.append(mem_usage)
            else:
                ret = max([
                    ret,
                    _get_memory(proc,
                                backend,
                                include_children=include_children)
                ])

            time.sleep(interval)
            # Flush every 50 lines.
            if counter % 50 == 0 and stream is not None:
                stream.flush()
    if stream:
        return None
    return ret
Esempio n. 2
0
def process_memory_percentage():
    """Return current process's memory utilization as a percentage; process memory / total 'physical' memory * 100."""

    # use RSS as basis for memory metric
    return psutil.Process().memory_percent(memtype="rss")
Esempio n. 3
0
def test_execute_dont_kill_children():
    pid = execute.python()(create_sleeper_subprocess)()
    subprocess = psutil.Process(pid)
    assert subprocess.status() == 'sleeping'
    subprocess.terminate()  # cleanup
Esempio n. 4
0
def getfindings(scan_id):
    res = {"page": "getfindings", "scan_id": scan_id}
    if scan_id not in this.scans.keys():
        res.update({
            "status": "error",
            "reason": "scan_id '{}' not found".format(scan_id)
        })
        return jsonify(res)

    proc = this.scans[scan_id]["proc"]

    # check if the scan is finished
    status()
    if hasattr(proc, 'pid') and psutil.pid_exists(proc.pid) and psutil.Process(
            proc.pid).status() in ["sleeping", "running"]:
        # print "scan not finished"
        res.update({"status": "error", "reason": "Scan in progress"})
        return jsonify(res)

    # check if the report is available (exists && scan finished)
    report_filename = BASE_DIR + "/results/nmap_{}.xml".format(scan_id)
    if not os.path.exists(report_filename):
        res.update({"status": "error", "reason": "Report file not available"})
        return jsonify(res)

    issues = _parse_report(report_filename, scan_id)
    scan = {"scan_id": scan_id}
    summary = {
        "nb_issues": len(issues),
        "nb_info": len(issues),
        "nb_low": 0,
        "nb_medium": 0,
        "nb_high": 0,
        "engine_name": "nmap",
        "engine_version": this.scanner['version']
    }

    # Store the findings in a file
    with open(BASE_DIR + "/results/nmap_" + scan_id + ".json",
              'w') as report_file:
        json.dump({
            "scan": scan,
            "summary": summary,
            "issues": issues
        },
                  report_file,
                  default=_json_serial)

    # Delete the tmp hosts file (used with -iL argument upon launching nmap)
    hosts_filename = BASE_DIR + "/tmp/engine_nmap_hosts_file_scan_id_{}.tmp".format(
        scan_id)
    if os.path.exists(hosts_filename):
        os.remove(hosts_filename)

    res.update({
        "scan": scan,
        "summary": summary,
        "issues": issues,
        "status": "success"
    })
    return jsonify(res)
        command = "ip -6 route add %s encap seg6 mode %s segs %s dev %s; " %(args.prefix+str(i), args.encapmode, args.segments, args.device)
      elif 'del' in args:    
        command = "ip -6 route del %s dev %s; " %(args.prefix+str(i), args.device)
      elif 'changesr' in args:
        command = "ip -6 r change %s encap seg6 mode encap segs %s dev %s; " %(args.prefix+str(i), args.egments, args.device)
      client.run_command(command)
    client.terminate()
  else:
    print(bcolors.FAIL + "mode should be: b(bunch), s(sequential), si (sequentialInOneConnection)"+ bcolors.ENDC)

if __name__ == '__main__':

  functionName = sys.argv[1]
  args = get_args(functionName)

  p=psutil.Process(os.getpid())
  p.cpu_percent()
  psutil.cpu_percent()
  start_time = time.time()
  run(args)
  executionTime=time.time() - start_time
  appCPUUsage = p.cpu_percent()*executionTime
  SystemCPUUsage = psutil.cpu_percent()*executionTime
  memoryUsage = p.memory_info()[0]/2.**10

  print(bcolors.OKBLUE + "Execution time: " + str(executionTime) + bcolors.ENDC)
  print(bcolors.OKBLUE + "App CPU Usage: " + str(appCPUUsage) + bcolors.ENDC)
  print(bcolors.OKBLUE + "Total CPU Usage: " + str(SystemCPUUsage) + bcolors.ENDC)
  print(bcolors.OKBLUE + "Memory Usage (KB): " + str(memoryUsage) + bcolors.ENDC)

  f1 = open(args.fileName +'.txt','a+')
Esempio n. 6
0
    bot.config = json.load(data)


#initialize db connection
bot.db = sqlite3.connect('main.db')
#create tables for muted members and access roles. Necessary for basic functionality
bot.db.execute('CREATE TABLE IF NOT EXISTS mutes (id integer NOT NULL primary key AUTOINCREMENT, member_id varchar, member_name varchar, mute_time integer, server_id varchar)')
bot.db.execute('CREATE TABLE IF NOT EXISTS roles (id integer NOT NULL primary key AUTOINCREMENT, role_id varchar, role varchar, level int, serverid int)')
bot.db.commit()


#global storages
bot.access_roles = {} #roles
bot.unmute_timers = {} #mutes
bot.server_settings = {} #per server settings
bot.process = psutil.Process()

@bot.event
async def on_ready():
    bot.remove_command('help')
    bot.start_time = datetime.utcnow()

    print("\n{} has started!\n".format(bot.user.name))
    print("Current time is {}".format(bot.start_time))

    cursor = bot.db.cursor()
    
    for server in bot.guilds:
        bot.access_roles.update({server.id : {}}) #add server to access_role storage
        bot.unmute_timers.update({server.id: {}}) #add server to unmute_timers storage
        bot.server_settings.update({server.id : {'wiki_lang':'eng'}})
Esempio n. 7
0
File: pool.py Progetto: djwisdom/awx
 def mb(self):
     if self.alive:
         return '{:0.3f}'.format(psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0)
     return '0'
def print_mem():
    process = psutil.Process(os.getpid())
    mem = process.memory_info().rss / 1000
    print("Used this much memory: " + str(mem))
Esempio n. 9
0
# --- paths

ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
SCRIPTS_DIR = os.path.join(ROOT_DIR, 'scripts')
HERE = os.path.abspath(os.path.dirname(__file__))

# --- support

HAS_CPU_AFFINITY = hasattr(psutil.Process, "cpu_affinity")
HAS_CPU_FREQ = hasattr(psutil, "cpu_freq")
HAS_CONNECTIONS_UNIX = POSIX and not SUNOS
HAS_ENVIRON = hasattr(psutil.Process, "environ")
HAS_PROC_IO_COUNTERS = hasattr(psutil.Process, "io_counters")
HAS_IONICE = hasattr(psutil.Process, "ionice")
HAS_MEMORY_FULL_INFO = 'uss' in psutil.Process().memory_full_info()._fields
HAS_MEMORY_MAPS = hasattr(psutil.Process, "memory_maps")
HAS_PROC_CPU_NUM = hasattr(psutil.Process, "cpu_num")
HAS_RLIMIT = hasattr(psutil.Process, "rlimit")
HAS_THREADS = hasattr(psutil.Process, "threads")
HAS_SENSORS_BATTERY = hasattr(psutil, "sensors_battery")
HAS_BATTERY = HAS_SENSORS_BATTERY and bool(psutil.sensors_battery())
HAS_SENSORS_FANS = hasattr(psutil, "sensors_fans")
HAS_SENSORS_TEMPERATURES = hasattr(psutil, "sensors_temperatures")

# --- misc


def _get_py_exe():
    def attempt(exe):
        try:
Esempio n. 10
0
#    plt.plot(xi,yi,color=cloo[jj],marker='+')
#    plt.plot(xj,yj,color=cloo[kk],marker='+')
#    
    plt.show()
    
    
    
print(h,len(h))
#h1=h
#h2=np.array(h1)
#print(np.argsort(h2))
#print(h2)
#h3=sorted(h2)
#h3=np.array(h3)
#print(h3)



elapsed = (time.clock() - start)
print("Time used:",elapsed)

#print(sys.getsizeof(xi))
info = psutil.virtual_memory()
print ('内存占用',psutil.Process(os.getpid()).memory_info().rss)
#print ("总内存",info.total)
#print ('内存占比',info.percent)
#print ('cpu个数',psutil.cpu_count())



Esempio n. 11
0
def show_memory_info(hint):
    pid = os.getpid()
    p = psutil.Process(pid)
    info = p.memory_info()
    memory = info.rss / 1024. / 1024.
    print("{} memory used: {}".format(hint, memory))
Esempio n. 12
0
# labels = np.tile(groups, N // K)
data = np.random.randn(N)

data = np.random.randn(N)

Ks = [100, 1000, 5000, 10000, 25000, 50000, 100000]

# Ks = [500000, 1000000, 2500000, 5000000, 10000000]

import psutil
import os
import gc

pid = os.getpid()
proc = psutil.Process(pid)


def dict_unique(values, expected_K, sort=False, memory=False):
    if memory:
        gc.collect()
        before_mem = proc.get_memory_info().rss

    rizer = lib.DictFactorizer()
    result = rizer.unique_int64(values)

    if memory:
        result = proc.get_memory_info().rss - before_mem
        return result

    if sort:
Esempio n. 13
0
def cpuStats():
    pid = os.getpid()
    py = psutil.Process(pid)
    memoryUse = py.memory_info()[0] / 2.**30
    print('memory GB:', memoryUse)
Esempio n. 14
0
 def process_memory_usage_psutil() -> float:
     process = psutil.Process(os.getpid())
     return to_mb(int(process.memory_info().rss))
Esempio n. 15
0
    def check_memory(self, driver):
        try:
            import psutil
        except ImportError:
            logger.error('no psutil module')
            return

        mem_limit = {}
        idle_since = time.time()

        while True:
            with self.lock:
                tids_to_pop = []
                for tid, (task, proc) in self.tasks.iteritems():
                    task_id = task.task_id
                    try:
                        pid = proc.pid
                        p = psutil.Process(pid)
                        rss = p.memory_info().rss >> 20
                    except Exception as e:
                        logger.error(
                            'worker process %d of task %s is dead: %s', pid,
                            tid, e)
                        reply_status(driver, task_id, 'TASK_LOST')
                        tids_to_pop.append(tid)
                        continue

                    if p.status(
                    ) == psutil.STATUS_ZOMBIE or not p.is_running():
                        reply_status(driver, task_id, 'TASK_LOST')
                        proc.join(CLEAN_ZOMBIE_TIME_OUT)
                        try:
                            os.waitpid(proc.pid, os.WNOHANG)
                        except OSError as e:
                            if e.errno == errno.ECHILD:
                                tids_to_pop.append(tid)
                            else:
                                logger.exception('process termination fail: ',
                                                 e.message)
                        else:
                            logger.error(
                                'worker process %d of task %s is zombie', pid,
                                tid)
                        continue

                    offered = get_task_memory(task)
                    if not offered:
                        continue
                    if rss > offered * 1.5:
                        logger.warning(
                            'task %s used too much memory: %dMB > %dMB * 1.5, '
                            'kill it. '
                            'use -M argument or taskMemory '
                            'to request more memory.', tid, rss, offered)

                        reply_status(driver, task_id, 'TASK_KILLED')
                        tids_to_pop.append(tid)
                        terminate(tid, proc)
                    elif rss > offered * mem_limit.get(tid, 1.0):
                        logger.debug(
                            'task %s used too much memory: %dMB > %dMB, '
                            'use -M to request or taskMemory for more memory',
                            tid, rss, offered)
                        mem_limit[tid] = rss / offered + 0.1
                for tid in tids_to_pop:
                    self.tasks.pop(tid)
                now = time.time()
                if self.tasks:
                    idle_since = now
                elif idle_since + MAX_EXECUTOR_IDLE_TIME < now:
                    os._exit(0)

            time.sleep(1)
Esempio n. 16
0
def reap_children(recursive=False):
    """Terminate and wait() any subprocess started by this test suite
    and ensure that no zombies stick around to hog resources and
    create problems  when looking for refleaks.

    If resursive is True it also tries to terminate and wait()
    all grandchildren started by this process.
    """

    # This is here to make sure wait_procs() behaves properly and
    # investigate:
    # https://ci.appveyor.com/project/giampaolo/psutil/build/job/
    #     jiq2cgd6stsbtn60
    def assert_gone(pid):
        assert not psutil.pid_exists(pid), pid
        assert pid not in psutil.pids(), pid
        try:
            p = psutil.Process(pid)
            assert not p.is_running(), pid
        except psutil.NoSuchProcess:
            pass
        else:
            assert 0, "pid %s is not gone" % pid

    # Get the children here, before terminating the children sub
    # processes as we don't want to lose the intermediate reference
    # in case of grandchildren.
    if recursive:
        children = set(psutil.Process().children(recursive=True))
    else:
        children = set()

    # Terminate subprocess.Popen instances "cleanly" by closing their
    # fds and wiat()ing for them in order to avoid zombies.
    while _subprocesses_started:
        subp = _subprocesses_started.pop()
        _pids_started.add(subp.pid)
        try:
            subp.terminate()
        except OSError as err:
            if WINDOWS and err.errno == 6:  # "invalid handle"
                pass
            elif err.errno != errno.ESRCH:
                raise
        if subp.stdout:
            subp.stdout.close()
        if subp.stderr:
            subp.stderr.close()
        try:
            # Flushing a BufferedWriter may raise an error.
            if subp.stdin:
                subp.stdin.close()
        finally:
            # Wait for the process to terminate, to avoid zombies.
            try:
                subp.wait()
            except OSError as err:
                if err.errno != errno.ECHILD:
                    raise

    # Terminate started pids.
    while _pids_started:
        pid = _pids_started.pop()
        try:
            p = psutil.Process(pid)
        except psutil.NoSuchProcess:
            assert_gone(pid)
        else:
            children.add(p)

    # Terminate children.
    if children:
        for p in children:
            try:
                p.terminate()
            except psutil.NoSuchProcess:
                pass
        gone, alive = psutil.wait_procs(children, timeout=GLOBAL_TIMEOUT)
        for p in alive:
            warn("couldn't terminate process %r; attempting kill()" % p)
            try:
                p.kill()
            except psutil.NoSuchProcess:
                pass
        gone, alive = psutil.wait_procs(alive, timeout=GLOBAL_TIMEOUT)
        if alive:
            for p in alive:
                warn("process %r survived kill()" % p)

        for p in children:
            assert_gone(p.pid)
Esempio n. 17
0
    def init_Varialbes_Serial(self):
        global s

        #self.s = serial.Serial()
        #self.s.port = '/dev/tty.SLAB_USBtoUART'
        #self.s.baudrate = 9600
        #self.s.bytesize = 7
        #self.s.parity = serial.PARITY_EVEN
        #self.s.stopbits = 1
        #self.s.timeout = 0.1

        p = psutil.Process(os.getpid())
        files = p.open_files()
        files.clear()

        #if self.s.is_open == False:
        #   self.s.open()

        s = self.s

        #print("puerto abierto",s.is_open)

        #print("datos !!!!!!")
        #print(files)
        #Registros hornos PLC orden:
        #- 0. tiempo de muestreo
        #- 1. ganancia proporcional
        #- 2. Ganancia integral
        #- 3. Ganancia derivativa
        #- 4. Direccion de control
        #- 5. Rango de tolerancia de error
        #- 6. Limite superior de salida
        #- 7. limite inferior de salida
        #- 8. Limite superior integral
        #- 9. limite inferior integral
        #- 10. Valor integral acumulado
        #- 11. PV anterior
        #- 12. Present value
        #- 13. Set value
        #- 14. GPWM

        # Formato para lectura modbus ':0103119A000150\r\n'
        # 01 ---> Direccion
        # 03 ---> Leer registro 06---> Escribir registro 119A ---> registro hexadecimal para este caso (119A = 4506)
        # 0001 ---> Numero de registros que se quiere leer, en este caso solo es un registro.
        # 50 ---> Check sum (FF - suma de todos los numeros pares 01+03+11) + 1

        pidH1 = 4506
        pidH2 = 4536
        pidH3 = 4556
        pidH4 = 4576

        self.vectorRegistrosHorno1 = [
            pidH1, pidH1 + 1, pidH1 + 2, pidH1 + 3, pidH1 + 4, pidH1 + 5,
            pidH1 + 6, pidH1 + 7, pidH1 + 8, pidH1 + 9, pidH1 + 10, pidH1 + 12,
            4124, 4125, 4526
        ]
        self.vectorRegistrosHorno2 = [
            pidH2, pidH2 + 1, pidH2 + 2, pidH2 + 3, pidH2 + 4, pidH2 + 5,
            pidH2 + 6, pidH2 + 7, pidH2 + 8, pidH2 + 9, pidH2 + 10, pidH2 + 12,
            4134, 4135, 4529
        ]
        self.vectorRegistrosHorno3 = [
            pidH3, pidH3 + 1, pidH3 + 2, pidH3 + 3, pidH3 + 4, pidH3 + 5,
            pidH3 + 6, pidH3 + 7, pidH3 + 8, pidH3 + 9, pidH3 + 10, pidH3 + 12,
            4144, 4145, 4532
        ]
        self.vectorRegistrosHorno4 = [
            pidH4, pidH4 + 1, pidH4 + 2, pidH4 + 3, pidH4 + 4, pidH4 + 5,
            pidH4 + 6, pidH4 + 7, pidH4 + 8, pidH4 + 9, pidH4 + 10, pidH4 + 12,
            4154, 4155, 4596
        ]

        # Rampas valores en decimal: 4129, 4139, 4149, 4159
        self.registrosRampasHornos_Hex = ['1021', '102B', '1035', '103F']

        # Registros controladores de flujo masico (MFC: Mass flow controllers)

        # Set values inicia a partir del registro 4098 (Dec) = 1002 (Hex), termina en 4101
        # Present values inician a partir del registro 4197 (Dec) = 1065 (Hex), termina en 4200

        self.registrosMFC1_SV_PV = ['1002', '1065']
        self.registrosMFC2_SV_PV = ['1003', '1066']
        self.registrosMFC3_SV_PV = ['1004', '1067']
        self.registrosMFC4_SV_PV = ['1005', '1068']
        self.registrosMFC5_SV_PV = ['1006', '1069']
        self.registrosMFC6_SV_PV = ['1007', '106A']

        # Escalado inicia a partir del registro 4606 (Dec) = 11FE (Hex), termina en 4621(Dec) = 120D(Hex)
        # In: Xmax, Xmin, Ymax, Ymin
        self.registrosMFC1_IN = ['11FE', '11FF', '1200', '1201']
        self.registrosMFC2_IN = ['1202', '1203', '1204', '1205']
        self.registrosMFC3_IN = ['1206', '1207', '1208', '1209']
        self.registrosMFC4_IN = ['120A', '120B', '120C', '120D']
        self.registrosMFC5_IN = ['120E', '120F', '1210', '1211']
        self.registrosMFC6_IN = ['1212', '1213', '1214', '1215']

        # Escalado inicia a partir del registro 4631 (Dec) = 1217 (Hex), termina en 4646 (Dec) = 1226(Hex)
        # Out: Xmax, Xmin, Ymax, Ymin
        self.registrosMFC1_OUT = ['1217', '1218', '1219', '121A']
        self.registrosMFC2_OUT = ['121B', '121C', '121D', '121E']
        self.registrosMFC3_OUT = ['121F', '1220', '1221', '1222']
        self.registrosMFC4_OUT = ['1223', '1224', '1225', '1226']
        self.registrosMFC5_OUT = ['1227', '1228', '1229', '122A']
        self.registrosMFC6_OUT = ['122B', '122C', '122D', '122E']

        self.vectorRegistrosHorno1_Hex = [
            hex(self.vectorRegistrosHorno1[0]),
            hex(self.vectorRegistrosHorno1[1]),
            hex(self.vectorRegistrosHorno1[2]),
            hex(self.vectorRegistrosHorno1[3]),
            hex(self.vectorRegistrosHorno1[4]),
            hex(self.vectorRegistrosHorno1[5]),
            hex(self.vectorRegistrosHorno1[6]),
            hex(self.vectorRegistrosHorno1[7]),
            hex(self.vectorRegistrosHorno1[8]),
            hex(self.vectorRegistrosHorno1[9]),
            hex(self.vectorRegistrosHorno1[10]),
            hex(self.vectorRegistrosHorno1[11]),
            hex(self.vectorRegistrosHorno1[12]),
            hex(self.vectorRegistrosHorno1[13]),
            hex(self.vectorRegistrosHorno1[14])
        ]

        self.vectorRegistrosHorno2_Hex = [
            hex(self.vectorRegistrosHorno2[0]),
            hex(self.vectorRegistrosHorno2[1]),
            hex(self.vectorRegistrosHorno2[2]),
            hex(self.vectorRegistrosHorno2[3]),
            hex(self.vectorRegistrosHorno2[4]),
            hex(self.vectorRegistrosHorno2[5]),
            hex(self.vectorRegistrosHorno2[6]),
            hex(self.vectorRegistrosHorno2[7]),
            hex(self.vectorRegistrosHorno2[8]),
            hex(self.vectorRegistrosHorno2[9]),
            hex(self.vectorRegistrosHorno2[10]),
            hex(self.vectorRegistrosHorno2[11]),
            hex(self.vectorRegistrosHorno2[12]),
            hex(self.vectorRegistrosHorno2[13]),
            hex(self.vectorRegistrosHorno2[14])
        ]

        self.vectorRegistrosHorno3_Hex = [
            hex(self.vectorRegistrosHorno3[0]),
            hex(self.vectorRegistrosHorno3[1]),
            hex(self.vectorRegistrosHorno3[2]),
            hex(self.vectorRegistrosHorno3[3]),
            hex(self.vectorRegistrosHorno3[4]),
            hex(self.vectorRegistrosHorno3[5]),
            hex(self.vectorRegistrosHorno3[6]),
            hex(self.vectorRegistrosHorno3[7]),
            hex(self.vectorRegistrosHorno3[8]),
            hex(self.vectorRegistrosHorno3[9]),
            hex(self.vectorRegistrosHorno3[10]),
            hex(self.vectorRegistrosHorno3[11]),
            hex(self.vectorRegistrosHorno3[12]),
            hex(self.vectorRegistrosHorno3[13]),
            hex(self.vectorRegistrosHorno3[14])
        ]

        self.vectorRegistrosHorno4_Hex = [
            hex(self.vectorRegistrosHorno4[0]),
            hex(self.vectorRegistrosHorno4[1]),
            hex(self.vectorRegistrosHorno4[2]),
            hex(self.vectorRegistrosHorno4[3]),
            hex(self.vectorRegistrosHorno4[4]),
            hex(self.vectorRegistrosHorno4[5]),
            hex(self.vectorRegistrosHorno4[6]),
            hex(self.vectorRegistrosHorno4[7]),
            hex(self.vectorRegistrosHorno4[8]),
            hex(self.vectorRegistrosHorno4[9]),
            hex(self.vectorRegistrosHorno4[10]),
            hex(self.vectorRegistrosHorno4[11]),
            hex(self.vectorRegistrosHorno4[12]),
            hex(self.vectorRegistrosHorno4[13]),
            hex(self.vectorRegistrosHorno4[14])
        ]

        self.registrosPIDHornosLectura = []
        self.registrosHorno = []
        self.registros_SetPresent_Value_Hornos = []
        self.registros_SetPresent_Value_Hornos_rampa = []
        self.registrosEscalado_IN = []
        self.registrosEscalado_OUT = []
        self.startBit = ':'
        self.prefijo_lectura = '0103'  #01: direccion, 03:operacion lectura (06 es para escritura)
        self.stopbits = '\r\n'  #Bis de stop

        self.start_Horno1_PIDWindow = False
        self.start_Horno2_PIDWindow = False
        self.start_Horno3_PIDWindow = False
        self.start_Horno4_PIDWindow = False

        self.start_Horno1_Reactor = False
        self.start_Horno2_Reactor = False
        self.start_Horno3_Reactor = False
        self.start_Horno4_Reactor = False

        self.startValveReactor_flag = False
Esempio n. 18
0
    async def info(self, ctx, *args: str):
        """Summary of cpu, memory, disk and network information
         Usage: info [option]
         Examples:
             sysinfo           Shows all available info
             sysinfo cpu       Shows CPU usage
             sysinfo memory    Shows memory usage
             sysinfo file      Shows full path of open files
             sysinfo disk      Shows disk usage
             sysinfo network   Shows network usage
             sysinfo boot      Shows boot time
         """

        options = ('cpu', 'memory', 'file', 'disk', 'network', 'boot')

        # CPU
        cpu_count_p = psutil.cpu_count(logical=False)
        cpu_count_l = psutil.cpu_count()
        if cpu_count_p is None:
            cpu_count_p = "N/A"
        cpu_cs = ("CPU Count"
                  "\n\t{0:<9}: {1:>3}".format("Physical", cpu_count_p) +
                  "\n\t{0:<9}: {1:>3}".format("Logical", cpu_count_l))
        psutil.cpu_percent(interval=None, percpu=True)
        await asyncio.sleep(1)
        cpu_p = psutil.cpu_percent(interval=None, percpu=True)
        cpu_ps = ("CPU Usage"
                  "\n\t{0:<8}: {1}".format("Per CPU", cpu_p) +
                  "\n\t{0:<8}: {1:.1f}%".format("Overall",
                                                sum(cpu_p) / len(cpu_p)))
        cpu_t = psutil.cpu_times()
        width = max([
            len("{:,}".format(int(n)))
            for n in [cpu_t.user, cpu_t.system, cpu_t.idle]
        ])
        cpu_ts = ("CPU Times"
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "User", int(cpu_t.user), width=width) +
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "System", int(cpu_t.system), width=width) +
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "Idle", int(cpu_t.idle), width=width))

        # Memory
        mem_v = psutil.virtual_memory()
        width = max([
            len(self._size(n)) for n in
            [mem_v.total, mem_v.available, (mem_v.total - mem_v.available)]
        ])
        mem_vs = ("Virtual Memory"
                  "\n\t{0:<10}: {1:>{width}}".format(
                      "Total", self._size(mem_v.total), width=width) +
                  "\n\t{0:<10}: {1:>{width}}".format(
                      "Available", self._size(mem_v.available), width=width) +
                  "\n\t{0:<10}: {1:>{width}} {2}%".format(
                      "Used",
                      self._size(mem_v.total - mem_v.available),
                      mem_v.percent,
                      width=width))
        mem_s = psutil.swap_memory()
        width = max([
            len(self._size(n))
            for n in [mem_s.total, mem_s.free, (mem_s.total - mem_s.free)]
        ])
        mem_ss = ("Swap Memory"
                  "\n\t{0:<6}: {1:>{width}}".format(
                      "Total", self._size(mem_s.total), width=width) +
                  "\n\t{0:<6}: {1:>{width}}".format(
                      "Free", self._size(mem_s.free), width=width) +
                  "\n\t{0:<6}: {1:>{width}} {2}%".format(
                      "Used",
                      self._size(mem_s.total - mem_s.free),
                      mem_s.percent,
                      width=width))

        # Open files
        open_f = psutil.Process().open_files()
        open_fs = "Open File Handles\n\t"
        if open_f:
            if hasattr(open_f[0], "mode"):
                open_fs += "\n\t".join(
                    ["{0} [{1}]".format(f.path, f.mode) for f in open_f])
            else:
                open_fs += "\n\t".join(["{0}".format(f.path) for f in open_f])
        else:
            open_fs += "None"

        # Disk usage
        disk_u = psutil.disk_usage(os.path.sep)
        width = max([
            len(self._size(n))
            for n in [disk_u.total, disk_u.free, disk_u.used]
        ])
        disk_us = (
            "Disk Usage"
            "\n\t{0:<6}: {1:>{width}}".format(
                "Total", self._size(disk_u.total), width=width) +
            "\n\t{0:<6}: {1:>{width}}".format(
                "Free", self._size(disk_u.free), width=width) +
            "\n\t{0:<6}: {1:>{width}} {2}%".format(
                "Used", self._size(disk_u.used), disk_u.percent, width=width))

        # Network
        net_io = psutil.net_io_counters()
        width = max([
            len(self._size(n)) for n in [net_io.bytes_sent, net_io.bytes_recv]
        ])
        net_ios = (
            "Network"
            "\n\t{0:<11}: {1:>{width}}".format(
                "Bytes sent", self._size(net_io.bytes_sent), width=width) +
            "\n\t{0:<11}: {1:>{width}}".format(
                "Bytes recv", self._size(net_io.bytes_recv), width=width))

        # Boot time
        boot_s = ("Boot Time"
                  "\n\t{0}".format(
                      datetime.datetime.fromtimestamp(
                          psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")))

        # Output
        msg = ""
        if not args or args[0].lower() not in options:
            msg = "\n\n".join([
                cpu_cs, cpu_ps, cpu_ts, mem_vs, mem_ss, open_fs, disk_us,
                net_ios, boot_s
            ])
        elif args[0].lower() == 'cpu':
            msg = "\n" + "\n\n".join([cpu_cs, cpu_ps, cpu_ts])
        elif args[0].lower() == 'memory':
            msg = "\n" + "\n\n".join([mem_vs, mem_ss])
        elif args[0].lower() == 'file':
            msg = "\n" + open_fs
        elif args[0].lower() == 'disk':
            msg = "\n" + disk_us
        elif args[0].lower() == 'network':
            msg = "\n" + net_ios
        elif args[0].lower() == 'boot':
            msg = "\n" + boot_s
        await self._say(ctx, msg)
        return
Esempio n. 19
0
    remotemeThread.daemon = False
    remotemeThread.start()
    time.sleep(1)
    parent = psutil.Process(psutil.Process().pid)
    children = parent.children(recursive=True)
    for process in children:
        if (process.name()=='remoteme'):
            remotemeThreadId=process.pid
            printBLUE("Remoteme run with pid:{}".format(remotemeThreadId))



#for pair in getProcess("stream\|python\|0"):
 #   print(pair)

printOK("Running remoteme Manager my pid is :{}".format(psutil.Process().pid))

signal.signal(signal.SIGUSR1, handler)


try:


    for x in range(0, 10000):
        addExisitingChildrensOfRemoteme()
        if (remotemeThread and remotemeThread.is_alive()):
            pass #print("remoteme me pid {} ".format(remotemeThreadId))
        else:
            runRemoteme()

Esempio n. 20
0
def main(args):
    _authurl = "http://" + args.get('url') + ":8080/auth/v1.0"
    conn = Connection(authurl=_authurl,
                      user=_user,
                      key=_key,
                      tenant_name=_tenant_name,
                      auth_version=_auth_version)
    #Perfs counters
    container_name = "expe-faas"
    process = None

    if MEMORY_USAGE:
        process = psutil.Process(os.getpid())

    end_time = 0
    extract_time_start = extract_time_stop = 0
    transform_time_start = transform_time_stop = 0
    load_time_start = load_time_stop = 0

    start_time = perf_counter()  #Get program starttime

    rand_images = [
        '1KB.jpg', '16KB.jpg', '32KB.jpg', '64KB.jpg', '126KB.jpg',
        '257KB.jpg', '517KB.jpg', '1.3MB.jpg', '2MB.jpg', '3.2MB.jpg'
    ]
    random.seed()

    image, threshold, softness = rand_images[random.randrange(
        0,
        len(rand_images) - 1, 1)], args['threshold'], args['softness']
    extract_time_start = perf_counter()  #Get extract phase starttime
    _, imgstream = conn.get_object(container_name, image)
    extract_time_stop = perf_counter()  #End recording the extract phase
    transform_time_start = perf_counter()
    with Image(blob=imgstream) as img:
        img.wavelet_denoise(threshold=threshold * img.quantum_range,
                            softness=softness)
        outputsize = len(img.make_blob('jpg'))
        #Change this to persist to S3 with a given bucket
        img.save(filename='out.jpg')
        transform_time_stop = perf_counter()
        load_time_start = perf_counter()
        with open('out.jpg', 'rb') as local:
            conn.put_object(container_name,
                            'resDenoise' + str(random.randrange(0, 100, 2)) +
                            '.jpg',
                            contents=local,
                            content_type="image/jpeg")
        load_time_stop = perf_counter()

    end_time = perf_counter()
    # print (
    #     {
    #         'outputsize': outputsize,
    #         'elapsed_time' : end_time - start_time,
    #         'extract_time' : extract_time_stop - extract_time_start,
    #         'transform_time' : transform_time_stop - transform_time_start,
    #         'load_time' : load_time_stop - load_time_start,
    #         'memory_usage' :  process.memory_info()[0] >> 20 if MEMORY_USAGE else 'Not defined'

    #     }
    # )

    return {
        'outputsize':
        outputsize,
        'elapsed_time':
        end_time - start_time,
        'extract_time':
        extract_time_stop - extract_time_start,
        'transform_time':
        transform_time_stop - transform_time_start,
        'load_time':
        load_time_stop - load_time_start,
        'memory_usage':
        process.memory_info()[0] >> 20 if MEMORY_USAGE else 'Not defined'
    }
Esempio n. 21
0
 def __init__(self, filename='pidfile'):
     self._process_name = psutil.Process(os.getpid()).cmdline()[0]
     self._file = filename
Esempio n. 22
0
 def get_memory_usage(self):
     return psutil.Process().memory_info().rss / 1024
Esempio n. 23
0
def main():

    machine_infos = tools.get_machine_infos()
    processes_to_monitor_pid = []
    processes_to_monitor = []

    while True:
        time.sleep(config.WAITING_TIME * 60)
        current_time = time.time()
        gpus_info = tools.gpus_snap_info()
        driver_version = gpus_info["driver_version"]
        news = ""
        send_info = False
        new_processes_count = 0
        died_processes_count = 0
        for index, gpu in enumerate(gpus_info["gpu"]):
            gpu_name = gpu["product_name"]
            processes = gpu["processes"]
            if processes == "N/A":
                news += f"Nothing is running on GPU {index} ({gpu_name})\n"
                continue
            else:
                for p in processes:
                    pid = p["pid"]
                    p_info = tools.process_info(pid)
                    if (current_time - p_info["since"] >=
                            config.PROCESS_AGE * 60
                            and not pid in processes_to_monitor_pid):
                        send_info = True
                        new_processes_count += 1
                        news += f"""

                        ---------------------------------------------------------------------------------------------------------------
                        A new process (PID : {pid}) has been launched on GPU {index} ({gpu_name}) by {p_info['owner']} since {datetime.datetime.fromtimestamp(int(p_info['since'])).strftime("%d/%m/%Y %H:%M:%S")}
                        His owner ({p_info['owner']}) has executed the following command :
                            {' '.join(p_info['executed_cmd'])}
                        From :
                            {p_info['from']}

                        CPU Status (currently):
                            For this process : {p_info['cpu_core_required']}

                        GPU Status (currently):
                            - Used memory (for this process): {p['used_memory']} / {gpu['fb_memory_usage']['total']} {gpu['fb_memory_usage']['unit']} ({round(p['used_memory']/gpu['fb_memory_usage']['total']*100,2)} % used)
                            - Used memory (for all processes running on this GPU) {gpu['fb_memory_usage']['used']} / {gpu['fb_memory_usage']['total']} {gpu['fb_memory_usage']['unit']} ({round(gpu['fb_memory_usage']['used']/gpu['fb_memory_usage']['total']*100,2)} % used)
                            - Temperature : {gpu["temperature"]["gpu_temp"]} Celsius
                            - Driver version : {driver_version}
                        ---------------------------------------------------------------------------------------------------------------


                        """
                        processes_to_monitor.append(p_info)
                        processes_to_monitor_pid.append(pid)
                    else:
                        continue

        for p in processes_to_monitor[:]:
            pid = p["pid"]
            try:
                still_running_p = psutil.Process(pid)
                continue
            except psutil.NoSuchProcess:
                send_info = True
                died_processes_count += 1
                news += f"""

                    ---------------------------------------------------------------------------------------------------------------
                    The process (PID : {pid}) launched by {p['owner']} since {datetime.datetime.fromtimestamp(int(p['since'])).strftime("%d/%m/%Y %H:%M:%S")} has ended.
                    His owner {p_info['owner']} had executed the following command :
                        {' '.join(p['executed_cmd'])}
                    From :
                        {p['from']}

                    The process took {datetime.timedelta(seconds=int(current_time)-int(p['since']))} to finish.
                    ---------------------------------------------------------------------------------------------------------------


                   """
                processes_to_monitor.remove(p)
                processes_to_monitor_pid.remove(pid)

        subject = None

        if new_processes_count > 0:
            subject = f"{new_processes_count} processes running on {machine_infos['MACHINE_NAME']} ({machine_infos['LOCAL_IP']})"
        elif died_processes_count > 0:
            subject = f"{died_processes_count} processes died on {machine_infos['MACHINE_NAME']} ({machine_infos['LOCAL_IP']})"
        else:
            subject = "Error"

        now = datetime.datetime.now()
        dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
        global_message = f"""                

            New events (triggered on the {dt_string}):
            {news}

            This message has been automatically send by a robot. Please don't answer to this mail.

            Please, feel free to open a merge request on github.com/araison12/gpus_monitor if you have encountered a bug or to share your ideas to improve this tool :)        

        """

        if send_info:
            for person in config.persons_to_inform():
                tools.send_mail(subject, global_message, person)
Esempio n. 24
0
def run(args):
    # Set a relatively low cap on max open sessions, so we can saturate it in a reasonable amount of time
    args.max_open_sessions = 100
    args.max_open_sessions_hard = args.max_open_sessions + 20

    # Chunk often, so that new fds are regularly requested
    args.ledger_chunk_bytes = "500B"

    with infra.network.network(
        args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
    ) as network:
        check = infra.checker.Checker()
        network.start_and_join(args)
        primary, _ = network.find_nodes()

        primary_pid = primary.remote.remote.proc.pid

        initial_fds = psutil.Process(primary_pid).num_fds()
        assert (
            initial_fds < args.max_open_sessions
        ), f"Initial number of file descriptors has already reached session limit: {initial_fds} >= {args.max_open_sessions}"

        num_fds = initial_fds
        LOG.success(f"{primary_pid} has {num_fds} open file descriptors")

        initial_metrics = get_session_metrics(primary)
        assert initial_metrics["active"] <= initial_metrics["peak"], initial_metrics
        assert initial_metrics["soft_cap"] == args.max_open_sessions, initial_metrics
        assert (
            initial_metrics["hard_cap"] == args.max_open_sessions_hard
        ), initial_metrics

        def create_connections_until_exhaustion(target, continue_to_hard_cap=False):
            with contextlib.ExitStack() as es:
                clients = []
                LOG.success(f"Creating {target} clients")
                consecutive_failures = 0
                for i in range(target):
                    logs = []
                    try:
                        clients.append(
                            es.enter_context(
                                primary.client("user0", connection_timeout=1)
                            )
                        )
                        r = clients[-1].post(
                            "/app/log/private",
                            {"id": 42, "msg": "foo"},
                            log_capture=logs,
                        )
                        if r.status_code == http.HTTPStatus.OK:
                            check(
                                r,
                                result=True,
                            )
                            consecutive_failures = 0
                        elif r.status_code == http.HTTPStatus.SERVICE_UNAVAILABLE:
                            if continue_to_hard_cap:
                                consecutive_failures = 0
                                continue
                            raise RuntimeError(r.body.text())
                        else:
                            flush_info(logs)
                            raise ValueError(
                                f"Unexpected response status code: {r.status_code}"
                            )
                    except (CCFConnectionException, RuntimeError) as e:
                        flush_info(logs)
                        LOG.warning(f"Hit exception at client {i}: {e}")
                        clients.pop(-1)
                        if consecutive_failures < 5:
                            # Maybe got unlucky and tried to create a session while many files were open - keep trying
                            consecutive_failures += 1
                            continue
                        else:
                            # Ok you've really hit a wall, stop trying to create clients
                            break
                else:
                    raise AllConnectionsCreatedException(
                        f"Successfully created {target} clients without exception - expected this to exhaust available connections"
                    )

                num_fds = psutil.Process(primary_pid).num_fds()
                LOG.success(
                    f"{primary_pid} has {num_fds}/{max_fds} open file descriptors"
                )

                r = clients[-1].get("/node/metrics")
                assert r.status_code == http.HTTPStatus.OK, r.status_code
                peak_metrics = r.body.json()["sessions"]
                assert peak_metrics["active"] <= peak_metrics["peak"], peak_metrics
                assert peak_metrics["active"] >= len(clients), (
                    peak_metrics,
                    len(clients),
                )

                # Submit many requests, and at least enough to trigger additional snapshots
                more_requests = max(len(clients) * 3, args.snapshot_tx_interval * 2)
                LOG.info(
                    f"Submitting an additional {more_requests} requests from existing clients"
                )
                for _ in range(more_requests):
                    client = random.choice(clients)
                    logs = []
                    try:
                        client.post(
                            "/app/log/private",
                            {"id": 42, "msg": "foo"},
                            timeout=1,
                            log_capture=logs,
                        )
                    except Exception as e:
                        flush_info(logs)
                        LOG.error(e)
                        raise e

                time.sleep(1)
                num_fds = psutil.Process(primary_pid).num_fds()
                LOG.success(
                    f"{primary_pid} has {num_fds}/{max_fds} open file descriptors"
                )

                LOG.info("Disconnecting clients")
                clients = []

            time.sleep(1)
            num_fds = psutil.Process(primary_pid).num_fds()
            LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
            return num_fds

        # For initial safe tests, we have many more fds than the maximum sessions, so file operations should still succeed even when network is saturated
        max_fds = args.max_open_sessions + (initial_fds * 2)
        resource.prlimit(primary_pid, resource.RLIMIT_NOFILE, (max_fds, max_fds))
        LOG.success(f"Setting max fds to safe initial value {max_fds} on {primary_pid}")

        nb_conn = (max_fds - num_fds) * 2
        num_fds = create_connections_until_exhaustion(nb_conn)

        to_create = max_fds - num_fds + 1
        num_fds = create_connections_until_exhaustion(to_create)

        try:
            create_connections_until_exhaustion(to_create, True)
        except AllConnectionsCreatedException as e:
            # This is fine! The soft cap means this test no longer reaches the hard cap.
            # It gets HTTP errors but then _closes_ sockets, fast enough that we never hit the hard cap
            pass

        final_metrics = get_session_metrics(primary)
        assert final_metrics["active"] <= final_metrics["peak"], final_metrics
        assert final_metrics["peak"] > initial_metrics["peak"], (
            initial_metrics,
            final_metrics,
        )
        assert final_metrics["peak"] >= args.max_open_sessions, final_metrics
        assert final_metrics["peak"] < args.max_open_sessions_hard, final_metrics

        # Now set a low fd limit, so network sessions completely exhaust them - expect this to cause failures
        max_fds = args.max_open_sessions // 2
        resource.prlimit(primary_pid, resource.RLIMIT_NOFILE, (max_fds, max_fds))
        LOG.success(f"Setting max fds to dangerously low {max_fds} on {primary_pid}")

        try:
            num_fds = create_connections_until_exhaustion(to_create)
        except Exception as e:
            LOG.warning(
                f"Node with only {max_fds} fds crashed when allowed to created {args.max_open_sessions} sessions, as expected"
            )
            LOG.warning(e)
            network.ignore_errors_on_shutdown()
        else:
            raise RuntimeError("Expected a fatal crash and saw none!")
Esempio n. 25
0
def process_memory_used():
    """Return memory used by current process."""
    memory_info = psutil.Process().memory_full_info()

    # use RSS as basis for memory metric
    return memory_info.rss
Esempio n. 26
0
        def create_connections_until_exhaustion(target, continue_to_hard_cap=False):
            with contextlib.ExitStack() as es:
                clients = []
                LOG.success(f"Creating {target} clients")
                consecutive_failures = 0
                for i in range(target):
                    logs = []
                    try:
                        clients.append(
                            es.enter_context(
                                primary.client("user0", connection_timeout=1)
                            )
                        )
                        r = clients[-1].post(
                            "/app/log/private",
                            {"id": 42, "msg": "foo"},
                            log_capture=logs,
                        )
                        if r.status_code == http.HTTPStatus.OK:
                            check(
                                r,
                                result=True,
                            )
                            consecutive_failures = 0
                        elif r.status_code == http.HTTPStatus.SERVICE_UNAVAILABLE:
                            if continue_to_hard_cap:
                                consecutive_failures = 0
                                continue
                            raise RuntimeError(r.body.text())
                        else:
                            flush_info(logs)
                            raise ValueError(
                                f"Unexpected response status code: {r.status_code}"
                            )
                    except (CCFConnectionException, RuntimeError) as e:
                        flush_info(logs)
                        LOG.warning(f"Hit exception at client {i}: {e}")
                        clients.pop(-1)
                        if consecutive_failures < 5:
                            # Maybe got unlucky and tried to create a session while many files were open - keep trying
                            consecutive_failures += 1
                            continue
                        else:
                            # Ok you've really hit a wall, stop trying to create clients
                            break
                else:
                    raise AllConnectionsCreatedException(
                        f"Successfully created {target} clients without exception - expected this to exhaust available connections"
                    )

                num_fds = psutil.Process(primary_pid).num_fds()
                LOG.success(
                    f"{primary_pid} has {num_fds}/{max_fds} open file descriptors"
                )

                r = clients[-1].get("/node/metrics")
                assert r.status_code == http.HTTPStatus.OK, r.status_code
                peak_metrics = r.body.json()["sessions"]
                assert peak_metrics["active"] <= peak_metrics["peak"], peak_metrics
                assert peak_metrics["active"] >= len(clients), (
                    peak_metrics,
                    len(clients),
                )

                # Submit many requests, and at least enough to trigger additional snapshots
                more_requests = max(len(clients) * 3, args.snapshot_tx_interval * 2)
                LOG.info(
                    f"Submitting an additional {more_requests} requests from existing clients"
                )
                for _ in range(more_requests):
                    client = random.choice(clients)
                    logs = []
                    try:
                        client.post(
                            "/app/log/private",
                            {"id": 42, "msg": "foo"},
                            timeout=1,
                            log_capture=logs,
                        )
                    except Exception as e:
                        flush_info(logs)
                        LOG.error(e)
                        raise e

                time.sleep(1)
                num_fds = psutil.Process(primary_pid).num_fds()
                LOG.success(
                    f"{primary_pid} has {num_fds}/{max_fds} open file descriptors"
                )

                LOG.info("Disconnecting clients")
                clients = []

            time.sleep(1)
            num_fds = psutil.Process(primary_pid).num_fds()
            LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
            return num_fds
from torch.utils.data import DataLoader

from models.rnn_dynamics import RNNDynamics
from models.interaction_dynamics import InteractionDynamics
from trainers.dynamics_dataset import DynamicsDataset
from trainers.trainable_derender import Nan_Exception, create_new_timer
from utils.build import make_lr_scheduler, make_optimizer
from utils.checkpoint import Checkpointer
import logging as log

from utils.metric_logger import MetricLogger
from utils.misc import to_cuda
import torch.nn.functional as F
import json

proc_id = psutil.Process(os.getpid())


def build_dynamics_model(cfg, train_dataset):
    if cfg.MODEL.ARCHITECTURE == "rnn":
        return RNNDynamics(cfg, train_dataset)
    elif cfg.MODEL.ARCHITECTURE == "interaction":
        return InteractionDynamics(cfg, train_dataset)


class TrainableDynamics:
    def __init__(self,cfg):
        # TEST = ("intphys_dev-meta_O1",
        #         "intphys_dev-meta_O2",
        #         "intphys_dev-meta_O3")
        val_datasets = {k: DynamicsDataset(cfg, cfg.DATASETS.TEST,k)
Esempio n. 28
0
print(psutil.swap_memory())

# 获取磁盘信息
print(psutil.disk_partitions())  # 磁盘分区信息
print(psutil.disk_usage('D:'))  # 磁盘使用情况
print(psutil.disk_io_counters())  # 磁盘IO

# 获取网络信息
print(psutil.net_io_counters())  # 获取网络读写字节/包的个数
print(psutil.net_if_addrs())  # 获取网络接口信息
print(psutil.net_if_stats())  # 获取网络接口状态
print(psutil.net_connections())  # 获取当前网络连接信息

# 获取进程信息
print(psutil.pids())  # 所有进程ID
p = psutil.Process(12196)  # 获取指定进程ID=12196
print(p.name())  # 进程名称
print(p.exe())  # 进程exe路径
print(p.cwd())  # 进程工作目录
print(p.cmdline())  # 进程启动的命令行
print(p.ppid())  # 父进程ID
print(p.parent())  # 父进程
print(p.children())  # 子进程列表
print(p.status())  # 进程状态
print(p.username())  # 进程用户名
print(p.create_time())  # 进程创建时间
# print(p.terminal())  # 进程终端
print(p.cpu_times())  # 进程使用的CPU时间
print(p.memory_info())  # 进程使用的内存
print(p.open_files())  # 进程打开的文件
print(p.connections())  # 进程相关网络连接
Esempio n. 29
0
def test_execute_kill_children():
    pid = execute.python(kill_children=True)(create_sleeper_subprocess)()
    with pytest.raises(psutil.NoSuchProcess):
        psutil.Process(pid)
Esempio n. 30
0
def print_cpu_memory():
    pid = os.getpid()
    py = psutil.Process(pid)
    memoryUse = py.memory_info()[0] / 2. ** 30  # memory use in GB...I think
    print('memory use:', memoryUse)