def register_memory_usage(): pid = os.getpid() peak = get_peak_mem() fp = open(memfile_path, 'a') print_tortoise_memory_log( { 'pid': pid, 'peak1': peak[0], 'peak2': peak[1], 'est': sizs[idx], 'bibs': bibs[idx] }, fp) fp.close()
def register_memory_usage(): pid = os.getpid() peak = get_peak_mem() fp = open(memfile_path, 'a') print_tortoise_memory_log( {'pid' : pid, 'peak1': peak[0], 'peak2': peak[1], 'est' : sizs[idx], 'bibs' : bibs[idx] }, fp ) fp.close()
def schedule(job, args, sizs): assert len(args) == len(sizs) max_workers = get_cores_count() pid_2_size = {} #free = get_free_memory() free = get_total_memory() bibs = sizs initialize_ram_estimation() sizs = map(estimate_ram_usage, sizs) if bconfig.DEBUG_PROCESS_PEAK_MEMORY: clear_tortoise_memory_log() too_big = sorted((idx for idx in xrange(len(sizs)) if sizs[idx] > free), reverse=True) for idx in too_big: pid = os.fork() if pid == 0: # child job(*args[idx]) if bconfig.DEBUG_PROCESS_PEAK_MEMORY: pid = os.getpid() print_tortoise_memory_log( {'pid' : pid, 'peak' : get_peak_mem(), 'est' : sizs[idx], 'bibs' : bibs[idx]}) os._exit(0) else: # parent del args[idx] del sizs[idx] del bibs[idx] cpid, status = os.wait() assert cpid == pid while args or pid_2_size: while len(pid_2_size) < max_workers: idx = get_biggest_below(free, sizs) if idx != -1: pid = os.fork() if pid == 0: # child job(*args[idx]) if bconfig.DEBUG_PROCESS_PEAK_MEMORY: pid = os.getpid() print_tortoise_memory_log( {'pid' : pid, 'peak' : get_peak_mem(), 'est' : sizs[idx], 'bibs' : bibs[idx]}) os._exit(0) else: # parent pid_2_size[pid] = (sizs[idx], args[idx]) assert free > sizs[idx] free -= sizs[idx] del args[idx] del sizs[idx] del bibs[idx] else: break pid, status = os.wait() assert pid in pid_2_size freed, name = pid_2_size[pid] if status != 0: import sys print >> sys.stderr, "Worker %s died." % str(name) sys.stderr.flush() assert False free += freed del pid_2_size[pid] assert not pid_2_size