Пример #1
0
 def test_native_iterparse(self):
     """Displays time and memory used for empty native parsing"""
     for _event, _element in ElementTree.iterparse(self.file,
                                               events=('start', 'end')):
         # Do something here
         pass
     print guppy.hpy().heap()
Пример #2
0
 def start(self):
   try:
     if self.dry_run == False:
       # Attempt to connect to the printer
       self.do_connect("")
       if self.p.printer == None: sys.exit(1)
       print "Connecting to printer..."
       # Wait for the attempt to succeed or timeout
       for x in range(0,50-1):
         if self.p.online == True: break
         sys.stdout.write(".")
         sys.stdout.flush()
         time.sleep(0.1)
       print ""
       if self.p.online == False:
         print "Unable to connect to printer: Connection timed-out."
         sys.exit(1)
       # Wait for the printer to finish connecting and then reset it
       time.sleep(2)
       self.reset()
     # Start the server, display the startup message and start the ioloop
     self.server.start()
     self.display_startup_message()
     self.server.ioloop.start()
   except Exception as ex:
     print traceback.format_exc()
     if args.heaptrace: print hpy().heap()
     self.p.disconnect()
     exit()
Пример #3
0
def move_label(dt, data):
	if dt > 5:
		print '----- stuttered %d seconds: heap dump -----'%dt
		print guppy.hpy().heap()
		data.galaxy_window.window.close()

	global x_direction, y_direction, x_move, y_move
	# for this test, the origin is the first named star
	data.galaxy_window.range_origin_star = data.galaxy_objects.named_stars[0]
	data.galaxy_window.range_origin_coordinate = data.galaxy_objects.named_stars[0].coordinates
	gw = data.galaxy_window.window
	gw._mouse_x += x_move*x_direction
	gw._mouse_y += y_move*y_direction
	if (gw._mouse_x > 399):
		gw._mouse_x = 399
		x_direction = x_direction * -1
		x_move = random.randint(1,10)
	elif (gw._mouse_x < 0):
		gw._mouse_x = 0
		x_direction = x_direction * -1
		x_move = random.randint(1,10)
	if (gw._mouse_y > 399):
		gw._mouse_y = 399
		y_direction = y_direction * -1
		y_move = random.randint(1,10)
	elif (gw._mouse_y < 0):
		gw._mouse_y = 0
		y_direction = y_direction * -1
		y_move = random.randint(1,10)
	gw._mouse_in_window = True
	data.galaxy_window.set_range_info()
Пример #4
0
 def method(self, name, *args):
     if guppy:
         # We instantiate the heapy environment here
         # so that the memory it consumes doesn't hang
         # around for the whole process
         hpy().heap().stat.dump(self.path)
     else:
         logger.error("guppy is not availabe, cannot log memory usage!")
Пример #5
0
 def on_key_up(self, key,
         scancode    = None,
         codepoint   = None,
         modifier    = None,
         **kwargs):
     if key == 104:
         import guppy
         print guppy.hpy().heap()
 def wrapper(*args, **kwargs):
     gc.disable()
     start = hpy().heap().size
     res = func(*args, **kwargs)
     end = hpy().heap().size
     gc.enable()
     mem_log(name=func.__name__, totmembytes=end , memdiffbytes=(end-start), args=self.arg_extractor(*args, **kwargs))
     return res
def __citations_from_file(input_file, should_profile):
    """
      Generator function that outputs the paper index, and citations for each entry
    """

    # Tokens for parsing
    index_token = '#index'
    citation_token = '#%'

    # Predicates for error checking
    none_none = lambda *items: all([item is not None for item in items])
    all_none = lambda *items: all([item is None for item in items])

    # Next entry data
    index = None
    citations = []

    # Global stats
    global papers_with_references
    total_papers = 0

    for line in input_file:
        line = line.strip()

        # Parse entry, enforcing that data appears in index -> citations order
        if line.startswith(index_token):
            assert all_none(index) and len(citations) == 0
            index = int(line[len(index_token):])

        elif line.startswith(citation_token):
            assert none_none(index)
            new_citation_id = int(line[len(citation_token):])
            assert new_citation_id >= 0
            citations.append(new_citation_id)

        elif len(line) == 0:
            total_papers += 1

            # Yield this entry if it has any citations,
            if none_none(index):
                if len(citations) > 0:
                    papers_with_references += 1
                    yield index, citations

            index = None
            citations = []

    # Dump the memory usage if we should
    if should_profile:
        print "\n\nMemory Usage After Parsing Document Citations:"
        from guppy import hpy
        print hpy().heap()
Пример #8
0
def show_dict_mem_usage(dict_path=None, verbose=False):
    """
    Show dictionary memory usage.
    """
    initial_mem = get_mem_usage()
    initial_time = time.time()

    morph = pymorphy2.MorphAnalyzer(dict_path)

    end_time = time.time()
    mem_usage = get_mem_usage()

    logger.info(
        "Memory usage: %0.1fM dictionary, %0.1fM total (load time %0.2fs)",
        (mem_usage - initial_mem) / (1024 * 1024),
        mem_usage / (1024 * 1024),
        end_time - initial_time,
    )

    if verbose:
        try:
            from guppy import hpy

            hp = hpy()
            logger.debug(hp.heap())
        except ImportError:
            logger.warn("guppy is not installed, detailed info is not available")
Пример #9
0
def random_sort3(n):
    hp = hpy()
    print "Heap at the beginning of the functionn", hp.heap()
    l = [random.random() for i in range(n)]
    l.sort()
    print "Heap at the end of the functionn", hp.heap()
    return l
Пример #10
0
	def process_response(self, request, response):   	
		if request.GET.has_key('prof'):
			h = hpy()
			mem_profile = h.heap()
			pd = ProfilerData(
				view = request.path,
				)
			
			self.prof.close()

			out = StringIO()
			old_stdout = sys.stdout
			sys.stdout = out

			stats = hotshot.stats.load(self.tmpfile.name)
			#stats.strip_dirs()
			stats.sort_stats('cumulative')
			stats.print_stats()

			sys.stdout = old_stdout
			stats_str = out.getvalue()

			if response and response.content and stats_str:
				response.content = "<h1>Instance wide RAM usage</h1><pre>%s</pre><br/><br/><br/><h1>CPU Time for this request</h1><pre>%s</pre>" % (
					mem_profile, stats_str
					)
				
			pd.profile = "Instance wide RAM usage\n\n%s\n\n\nCPU Time for this request\n\n%s" % (mem_profile, stats_str)
			pd.save()
		return response
Пример #11
0
 def do_heapy(self):
   from guppy import hpy
   self.send_response(200)
   self.send_header('Content-Type', 'text/plain')
   self.send_header('Cache-Control', 'no-cache')
   self.end_headers()
   self.wfile.write(hpy().heap())
Пример #12
0
 def _init_leak(self):
     if not self.leak: return
     if self.heap: return
     if self.gc: gc.set_debug(gc.DEBUG_LEAK)
     if guppy:
         self.heap = guppy.hpy()
         self.heap.setrelheap()
Пример #13
0
 def on_process_start(self, task):
     if not task.manager.options.mem_usage:
         return
     # start only once
     if self.heapy:
         return
     self.heapy = hpy()
Пример #14
0
    def _init_debug_mode(self):
        """
        """
        try:
            import resource
            import guppy
            import signal

            print '# DBG MODE: Debug Mode active'
            print '# DBG MODE: Initial memory usage : %f (MB)' % (float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024 / 1024)
            print '# DBG MODE: Collecting initial heap snaphot...'
            hp = guppy.hpy()
            heap_initial = hp.heap()

            heap_initial  # make pep8 happy

            def handle_signal(signal_number, frame_stack):
                self._launch_debug_mode()

            signal.signal(signal.SIGHUP, handle_signal)

            print '# DBG MODE: Initial heap snaphot collected'
            print '# DBG MODE: Do a `kill -HUP %s` to enter the debug mode at anytime' % os.getpid()
            print '# DBG MODE: Hitting CTRL+C stop Garuda then enter the debugg mode.'
        except:
            print '# DBG MODE: Cannot use Debugging Mode. Modules needed: `ipdb`, `resource`, `objgraph` and `guppy`'
            self._debug = False
        finally:
            print ''
Пример #15
0
def dump_memory_info(stream = None):
    close = False
    if not stream:
        stream = open(_MEMORY_LOG_PATH, 'a')
        close = True
    if HEAPY:
        hp = hpy().heap()
        stream.write('heap:\n')
        stream.write('{0}\n'.format(hp))
        stream.write('heap.byrcs:\n')
        stream.write('{0}\n'.format(hp.byrcs))
        stream.write('heap[0].byrcs:\n')
        stream.write('{0}\n'.format(hp[0].byrcs))
        stream.write('heap[0].byid:\n')
        stream.write('{0}\n'.format(hp[0].byid))
        stream.write('heap[0].byvia:\n')
        stream.write('{0}\n'.format(hp[0].byvia))
        stream.write('heap[0].byrcs[0].referrers.byrcs:\n')
        stream.write('{0}\n'.format(hp[0].byrcs[0].referrers.byrcs))
        stream.write('heap[0].byrcs[0].referrers.byrcs[0].referents:\n')
        stream.write('{0}\n'.format(hp[0].byrcs[0].referrers.byrcs[0].referents))
        stream.write('heap[0].byrcs[0].referrers.byrcs[0].referents.byvia:\n')
        stream.write('{0}\n'.format(hp[0].byrcs[0].referrers.byrcs[0].referents.byvia))
    else:
        stream.write('hpy (heapy) from the guppy package is not available\n')
    if close:
        stream.close()
Пример #16
0
def memory_trace_calls(frame, event, arg):
    global _LAST_MEMORY_LOG_TIME
    t = time.time()
    if t - _LAST_MEMORY_LOG_TIME < MEMORY_LOGGING_FREQUENCY:
        return
    _LAST_MEMORY_LOG_TIME = t
    if event != 'call':
        return
    co = frame.f_code
    func_name = co.co_name
    if func_name == 'write':
        return
    func_line_num = frame.f_lineno
    file_name = co.co_filename
    caller = frame.f_back
    caller_line_num = caller.f_lineno
    caller_file_name = caller.f_code.co_filename
    hp = hpy().heap()
    with open(_MEMORY_LOG_PATH, 'a') as out:
        out.write('Call to {0}: {1}: {2} (from {3}: {4})\n'.format(
            file_name,
            func_name,
            func_line_num,
            caller_file_name,
            caller_line_num))
        dump_memory_info(out)
        out.write('\n')
    if func_name in TRACE_LINES_INTO:
        memory_trace_lines
    return
Пример #17
0
    def test03_joinTable_guppy(self):
        import guppy, gc

        hpy = guppy.hpy()

        def f(ignored, last, first, i):
            gc.collect()
            next = hpy.heap()
            print "SINCE LAST TIME"
            print next - last
            print "SINCE FOREVER"
            print last - first
            serial = 0
            session = "session" + str(i)
            self.server_site.memcache.set(session, str(serial))
            headers = {"Cookie": "TWISTED_SESSION=" + session}
            d = client.getPage(
                "http://127.0.0.1:19481/POKER_REST",
                postdata='{"type":"PacketPokerTableJoin","game_id":1}',
                headers=headers,
            )
            d.addCallback(
                lambda x: client.getPage(
                    "http://127.0.0.1:19481/POKER_REST",
                    postdata='{"type":"PacketPokerTableQuit","game_id":1}',
                    headers=headers,
                )
            )
            d.addCallback(self.cleanMemcache)
            d.addCallback(f, next, first, i + 1)

        first = hpy.heap()
        i = 1
        f(None, first, first, i)
Пример #18
0
 def do_GET(s):
     job = re.compile('^/jobs/[0-9]+$')
     joblist = re.compile('^/jobs/?$')
     if (s.path == '/stats/heap' and 'guppy' in sys.modules):
         s.send_response(200)
         s.send_header('Content-Type:', 'text/plain')
         s.end_headers()
         s.wfile.write(hpy().heap())
     elif job.match(s.path):
         s.send_response(200)
         s.send_header('Content-Type:', 'application/json')
         s.end_headers()
         id = int(s.path.split('/')[2])
         result = dict()
         result['command'] = s.server.hbq.commands[id].args
         result['status'] = s.server.hbq.commands[id].status
         result['stdout'] = s.server.hbq.commands[id].stdout
         s.wfile.write(json.dumps(result))
     elif joblist.match(s.path):
         s.send_response(200)
         s.send_header('Content-Type:', 'application/json')
         s.end_headers()
         result = dict()
         for i, cmd in enumerate([[x.args, x.status]
                                 for x in s.server.hbq.commands]):
             result[i] = dict(command=" ".join(cmd[0]), status=cmd[1])
         s.wfile.write(json.dumps(result))
     else:
         SimpleHTTPRequestHandler.do_GET(s)
Пример #19
0
def main():
    F, J = build_forms()

    if 0:
        print '\n', '='*50, 'F'
        ei = process_form(F)

    if 1:
        print '\n', '='*50, 'J'
        ei = process_form(J)

    print '\n', '='*50, 'mem of only eiJ'
    del F
    del J
    printmem()

    find_the_memory_thief(ei.cell_integrals()[0].integrand())

    #print formatted_analysis(ei, classes=True)

    print
    print msize
    print mtime

    try:
        from guppy import hpy
        hp = hpy()
        print "heap:"
        print hp.heap()
    except:
        print "No guppy installed!"
Пример #20
0
def main(options, args):
    my_addr = (options.ip, int(options.port))
    logs_path = options.path
    print 'Using the following plug-ins:'
    print '*', options.routing_m_file
    print '*', options.lookup_m_file
    print 'Private DHT name:', options.private_dht_name
    routing_m_name = '.'.join(os.path.split(options.routing_m_file))[:-3]
    routing_m_mod = __import__(routing_m_name, fromlist=[''])
    lookup_m_name = '.'.join(os.path.split(options.lookup_m_file))[:-3]
    lookup_m_mod = __import__(lookup_m_name, fromlist=[''])

    dht = pymdht.Pymdht(my_addr, logs_path,
                        routing_m_mod,
                        lookup_m_mod,
                        options.private_dht_name,
                        logs_level)
    
    print '\nType "exit" to stop the DHT and exit'
    print 'Type "help" if you need'
    while (1):
        input = sys.stdin.readline().strip().split()
        if not input:
            continue
        command = input[0]
        if command == 'help':
            print '''
Available commands are:
- help
- fast info_hash bt_port
- exit
- m                  Memory information
'''
        elif command == 'exit':
            dht.stop()
            break
        elif command == 'm':
            import guppy
            h = guppy.hpy()
            print h.heap()
        elif command == 'fast':
            if len(input) != 3:
                print 'usage: fast info_hash bt_port'
                continue
            try:
                info_hash = identifier.Id(input[1])
            except (identifier.IdError):
                print 'Invalid info_hash (%s)' % input[1]
            try:
                bt_port = int(input[2])
            except:
                print 'Invalid bt_port (%r)' % input[2]
                continue
            success, peers = dht.get_peers(time.time(), info_hash,
                                           _on_peers_found, bt_port)
            if not success:
                print 'Lookup failed'
            if peers:
                print '[local] %d peer(s)' % (len(peers))
                print peers
Пример #21
0
 def test03_joinTable_guppy(self):
     import guppy, gc
     hpy = guppy.hpy()            
     def f(ignored, last, first, i):
         gc.collect()
         next = hpy.heap()
         print 'SINCE LAST TIME'
         print next - last
         print 'SINCE FOREVER'
         print last - first
         serial = 0
         session = 'session' + str(i)
         self.server_site.memcache.set(session, str(serial))
         headers = { 'Cookie': 'TWISTED_SESSION='+session }
         d = client.getPage("http://127.0.0.1:19481/POKER_REST", postdata = '{"type":"PacketPokerTableJoin","game_id":1}', headers = headers)
         d.addCallback(lambda x: client.getPage("http://127.0.0.1:19481/POKER_REST", postdata = '{"type":"PacketPokerTableQuit","game_id":1}', headers = headers))
         def cleanMemcache(x):
             pokermemcache.memcache_singleton.clear()
             pokermemcache.memcache_expiration_singleton.clear()
         d.addCallback(cleanMemcache)                  
         d.addCallback(f, next, first, i+1)
     first = hpy.heap()
     i = 1
     f(None, first, first, i)
     d = defer.Deferred()
     return d
	def dumpheap(self):
		if hpy is not None:
			h = hpy()
			h.heap().dump(self.path+"_heap_"+str(self.current_job.replace("/","_"))+".txt", "a")
			f = open(self.path+"_heap_"+str(self.current_job).replace("/","_")+"_str.txt","w+")
			f.write(str(h.heap()))
			f.close()
Пример #23
0
def dumpMemoryUsage():
    '''
    This is a function that prints the memory usage of w3af in real time.
    @author: Andres Riancho ( [email protected] )
    '''
    if not DEBUGMEMORY:
        pass
    else:
        hpy = guppy.hpy()
        h = hpy.heap()

        byrcs = h.byrcs
        
        if isinstance( byrcs, guppy.heapy.UniSet.IdentitySetMulti ):
            om.out.debug( str(byrcs) )
            for i in xrange(10):
                om.out.debug( str(byrcs[i].byvia) )
            #om.out.debug( 'The one:' + repr(byrcs[0].byid[0].theone) )
        
        if DEBUGREFERENCES:
            for objMemoryUsage in gc.get_objects():
                ###
                ### Note: str objects CAN'T be analyzed this way. They can't create loops, so they arent
                ### handled by the gc ( __cycling__ garcage collector ) .
                ###
                if isinstance( objMemoryUsage, fuzzableRequest.fuzzableRequest ):
                    om.out.debug('Objects of class fuzzableRequest are referenced by:' )
                    om.out.debug( str(hpy.iso(objMemoryUsage).sp) )
Пример #24
0
    def get(self):
        log = self.application.logger
        try:
            memstats = {}
            import gc
            log.info("start GC collect")
            memstats["collected"] = gc.collect()
            log.info("GC has finished collecting")
            try:
                import objgraph
                memstats["most_common_types"] = dict(objgraph.most_common_types(100))
            except ImportError as err:
                log.warning("can't create objgraph: %s", err)

            try:
                from guppy import hpy
                hp = hpy()
                h = hp.heap()
                memstats["heap"] = str(h)
            except ImportError as err:
                log.warning("can't create heapdump: %s", err)

            self.write(memstats)
        except Exception as err:
            self.write("unable to generate memstats %s" % err)
Пример #25
0
def getTotals():
    """The transcriptions can be done in chunks to enable parallelism. 
    This function combines the parts to get the total triphone count, assuming that 
    transcribed files have been saved as trans001.txt, trans002.txt, etc.
    """
    total = {}
    files = sorted(os.listdir("."))
    trans = []
    for i in files:
        if i[:5] == "trans" and i[-3:] == "txt":
            trans.append(i)
    ID = 0
    h = hpy()
    for i in range(7):
        print h.heap()
        sents, tot = makeSentenceList(trans[i], ID)
        ID = int(sents[-1][0]) + 1
        for k, v in tot.items():
            if total.has_key(k):
                total[k] += v
            else:
                total[k] = v
        del sents
        del tot
    return total
Пример #26
0
 def _loop(self, args, opts):
     if settings.get('MEMDEBUG_WITH_GUPPY', False) and guppy:
         heapy = guppy.hpy()
         
     task = Task().next(locked=0, completed=0)
     if task:
         task.lock()
         cmd = ['python', os.path.join(os.getcwd(), 'scrapy-ctl.py'), 'run']
         cmd.append('--task-id=%s'%task.id)
         if opts.child_logfile:
             cmd.append('--logfile=%s'%opts.child_logfile)
             cmd.append('--child')
         task.start = datetime.now()
         process = subprocess.Popen(cmd, shell=False, stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)
         task.result, task.errors = process.communicate()
         task.finish = datetime.now()
         task.completed = 1
         task.save()
         timetext.LANG = 'en'
         total = task.finish - task.start
         log.msg('Finished: %s(%s) in %s'%(task.name, task.id, timetext.stringify(total)), level=log.INFO, domain=task.domain)
         if settings.get('MEMDEBUG_WITH_GUPPY', False) and guppy:
             log.msg(heapy.heap(), level=log.DEBUG)
             heapy.setref()
     else:
         time.sleep(30)
Пример #27
0
def main():
    rbncls = rbnmol.rbnmol_total_sumzero
    
    if len(sys.argv) >= 2 and sys.argv[1] == "cached":
        rbncls = rbnmol_cached.rbnmol_cached_total_sumzero
        print "Using cached rbnmol class"
    
    time = 20.0
    seeds = 20
    copies = 25
    
    rng = random.Random(42)
    content = []
    for seed in xrange(seeds):
        content += [rbncls.generate(10, seed)]*copies

    
    
    rbnworld = AChemKit.sims_simple.AChemAbstract()
    rbnworld.noreactants = 2
    rbnworld.react = reaction.react

    #events = AChemKit.sims_simple.simulate_itterative_iter(rbnworld, self.content, time, rng)
    
    #events = AChemKit.sims_simple.simulate_stepwise_multiprocessing_iter(rbnworld, self.content, time, rng)
    #events = AChemKit.sims_simple.simulate_stepwise_iter(rbnworld, self.content, time, rng)
    events = AChemKit.sims_gillespie.simulate_gillespie_iter(rbnworld, content, time, rng)
    b = AChemKit.bucket.Bucket(events)
    h = hpy()
    print h.heap()
Пример #28
0
        def process_request(self, request):
            from guppy import hpy
        
            self.usage = 0

            self.heapy = hpy()
            self.heapy.setrelheap()
Пример #29
0
 def __init__(self, write_csv=False):
     super(Heap, self).__init__()
     self.heap_urls = self.data['heap_urls'] = {}
     self.hp = hpy()
     self.write_csv = write_csv
     if self.write_csv:
         self.csv_writer = csv.writer(open('heap.csv', 'w'))
Пример #30
0
def do_profiling(X):
    n_nghs = 5
    debug(X.shape)
    lsh.seed()
    out = open('dense_mem.txt', 'w')
    ct_size = 0
    lsh_size = 0
    sizes = [1000, 5000, 10000, 20000, 40000, 60000]
    h = hpy()
    for s in sizes:
        h.setrelheap()
        root = cover_tree.create(X[:s])
        ct_size = h.heap().indisize
        
        debug('querying cover tree')
        nghs = cover_tree.knn(n_nghs, X[0], root)
        
        # use the distance from cover tree to initialize lsh
        h.setrelheap()
        nn = lsh.start(X[:s], nghs[-1].dist)
        lsh_size = h.heap().indisize
        out.write("%f, %f, %f\n" % (ct_size, lsh_size, s))
        del nghs 
        del root
        del nn
        out.flush()
    out.close()
Пример #31
0
        """

        # update with parameter configuration
        for (path, _, _), val in zip(self.pars, config):
            code = "sim.%s = val" % (path, )
            exec code in {'sim': self.sim, 'val': val}

        self.sim.configure()
        return self.sim


if __name__ == '__main__':

    from guppy import hpy
    __h__ = hpy()

    from tvb.simulator.backend import driver_conf
    driver_conf.using_gpu = using_gpu = 1

    from tvb.simulator.lab import *

    # this is test driven devlopment speakin how i can help you

    model = models.Generic2dOscillator()
    conn = connectivity.Connectivity()
    conn.speed = array([4.0])
    coupling = coupling.Linear(a=0.0152)

    hiss = noise.Additive(nsig=ones((2, )) * 2**-10)
    heun = integrators.EulerStochastic(dt=2**-4, noise=hiss)
Пример #32
0
try:
    from twisted.conch import manhole, telnet
    from twisted.conch.insults import insults
    TWISTED_CONCH_AVAILABLE = True
except ImportError:
    TWISTED_CONCH_AVAILABLE = False

from scrapy.exceptions import NotConfigured
from scrapy import log, signals
from scrapy.utils.trackref import print_live_refs
from scrapy.utils.engine import print_engine_status
from scrapy.utils.reactor import listen_tcp

try:
    import guppy
    hpy = guppy.hpy()
except ImportError:
    hpy = None

# signal to update telnet variables
# args: telnet_vars
update_telnet_vars = object()


class TelnetConsole(protocol.ServerFactory):

    def __init__(self, crawler):
        if not crawler.settings.getbool('TELNETCONSOLE_ENABLED'):
            raise NotConfigured
        if not TWISTED_CONCH_AVAILABLE:
            raise NotConfigured
Пример #33
0
 def GET(self):
     import guppy
     h = guppy.hpy()
     return delegate.RawText(str(h.heap()))
Пример #34
0
def cmd_guppy():
    from guppy import hpy
    global hp
    hp = hpy()
Пример #35
0
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 05 13:34:00 2018

@author: 李弘一萌
"""
import pandas as pd
from WindPy import w
from guppy import hpy; hp = hpy()
from Performance import Performance_Main
from Signal_Index_Generation import Signal_Index_Generation_Main
from Signal_Generation import Signal_Generation_Main
from Backtest import OI_Strat_Bktest
w.start() 



 

###############################################################################
#设置回测参数
   
#ITS大于lambda时视为买入信号
para_lambda_optimize = True
para_lambda_best = False
if para_lambda_optimize == True:
    para_lambda_list = [x/100.0 for x in range(-100,100)]
else:
    if para_lambda_best == False:
        para_lambda_list = [0]
    else:
Пример #36
0
 def showHeapy():
     from guppy import hpy
     h = hpy()
     print(h.heap())
from guppy import hpy

hp = hpy()
hp.setrelheap()
# do stuff
print(hp.heap())
Пример #38
0
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
"""Startup script for running CJC directly from the "source" tree."""

#try:
#   import psyco
#   psyco.profile()
#except ImportError:
#   pass

import sys

if len(sys.argv) > 1 and sys.argv[1] == "--memory-profile":
    # as soon as possible
    from guppy import hpy
    heapy = hpy()
    sys.argv[1:] = sys.argv[2:]
else:
    heapy = None

import os
import glob

base_dir = sys.path[0]

l = glob.glob(os.path.join(base_dir, "../pyxmpp*"))
for p in l:
    if os.path.exists(os.path.join(p, "pyxmpp/__init__.py")):
        print >> sys.stderr, "PyXMPP sources found in:", p
        l = glob.glob(os.path.join(p, "build/lib*"))
        if l:
Пример #39
0
    def run(self):
        logger.info("Start OIA daemon")

        while True:

            if log_memory_usage:
                try:
                    logger.info("GC count: %s" % str(gc.get_count()))
                    h = hpy()
                    logger.debug(str(h.heap()))
                except:
                    logger.error(traceback.format_exc())

            [CurExp, CurFlow] = self.getCurrentRunInformation()

            if SystemType == "raptor" and CurExp != "":
                time.sleep(10)
                continue

            all_run_dirs = self.get_run_dirs()
            logger.debug('RUNS DIRECTORIES: %s' % all_run_dirs)

            # update flow status
            logger.debug('runs in p: %s' % self.runs_in_process)
            for run in self.runs_in_process:
                # check for last flow
                logger.debug('runs in p: %s' % run.dat_path)
                run.last_flow = run.exp_flows - 1
                if run.name == CurExp:
                    run.last_flow = CurFlow
                    logger.debug('flows = %d' % run.last_flow)

            # check for deleted runs
            # TODO

            # check for aborted runs
            for run in self.runs_in_process:
                if run.aborted():
                    logger.info('run aborted %s' % run.name)
                    run.killAnalysis()
                    for block in run.blocks:
                        if block in self.blocks_to_process:
                            self.blocks_to_process.remove(block)
                    self.runs_in_process.remove(run)
                    self.runs_processed.append(run)

            # check for finished runs
            #TODO, add timeout per run?
            logger.debug('check for finished runs')
            for run in self.runs_in_process:
                # check whether all blocks have finished
                nb_blocks_finished = 0
                for block in run.blocks:
                    if block.status == 'done':
                        nb_blocks_finished += 1

                logger.info("Run %s: %s blocks ready" %
                            (run.name, nb_blocks_finished))
                if nb_blocks_finished == len(run.blocks):

                    # update status
                    run.update_status_file()

                    # write timings
                    timing_file = os.path.join(run.sigproc_results_path,
                                               'timing.txt')
                    try:
                        with open(timing_file, 'a') as f:
                            f.write(run.gettiming())
                    except:
                        logger.error(traceback.format_exc())

                    #transfer timing.txt
                    try:
                        directory_to_transfer = "onboard_results/sigproc_results"
                        file_to_transfer = "timing.txt"
                        ret = Transfer(run.name, directory_to_transfer,
                                       file_to_transfer)
                        if ret == 0:
                            logger.debug("Transfer registered %s %s %s" %
                                         (run.name, directory_to_transfer,
                                          file_to_transfer))
                        else:
                            logger.error(
                                "Transfer failed %s %s %s, is datacollect running?"
                                % (run.name, directory_to_transfer,
                                   file_to_transfer))
                    except:
                        logger.error(traceback.format_exc())

                    self.runs_in_process.remove(run)
                    self.runs_processed.append(run)
                else:
                    logger.debug(run.gettiming())

            processed_run_dirs = [run.name for run in self.runs_processed]
            logger.debug('RUNS PROCESSED: %s' % processed_run_dirs)

            in_process_run_dirs = [run.name for run in self.runs_in_process]
            logger.debug('RUNS IN PROCESS: %s' % in_process_run_dirs)

            new_run_dirs = list(
                set(all_run_dirs) - set(processed_run_dirs) -
                set(in_process_run_dirs))
            logger.debug('RUNS NEW: %s' % new_run_dirs)

            # update /software/config/OIAStatus
            self.update_oiastatus_file()

            for run in self.runs_in_process:
                run.update_status_file()

            # add new runs (blocks)
            for run_dir in new_run_dirs:
                logger.info('NEW RUN DETECTED: %s' % run_dir)

                try:
                    arun = Run(run_dir, config)
                    logger.info(arun)
                except:
                    logger.error(traceback.format_exc())
                    continue

                if CurExp and not arun.exp_oia_during_run:
                    continue

                if CurExp == arun.name and CurFlow < 1:
                    continue

                if arun.exp_flows < self.flowblocks:
                    logger.info('skip run: %s, not enough flows' % arun.name)
                    self.runs_processed.append(arun)
                    continue

                if arun.aborted():
                    logger.info('skip aborted run: %s' % arun.name)
                    self.runs_processed.append(arun)
                    continue

                self.runs_in_process.append(arun)

                logger.info('arun.exp_oninstranalysis: %s' %
                            arun.exp_oninstranalysis)
                if not arun.exp_oninstranalysis:
                    continue

                # ignore autoanalyze option in explog.txt
                logger.info('autoanalyze: %s' % arun.explogdict['autoanalyze'])
                #if not arun.explogdict['autoanalyze']: # contains True,False instead of yes, no
                #    continue

                logger.info("ADD %s blocks" % arun.name)
                for block in arun.blocks:
                    if block.status != 'done':
                        self.blocks_to_process.append(block)

            self.printStatus()

            # process runs
            timestamp = time.time()
            logger.info('time since last run check %s' % timestamp)
            while self.blocks_to_process:
                # wait a while before checking if queue is empty
                time.sleep(3)

                logger.info(
                    'Status:        Blocks: {0:3d}  Beadfind: {1:2d}/{2:2d}  Analysis: {3:2d}/{4:2d}  Total: {5:2d}/{6:2d}'
                    .format(
                        len(self.blocks_to_process),
                        self.pool.beadfind_counter, self.nb_max_beadfind_jobs,
                        self.pool.analysis_counter, self.nb_max_analysis_jobs,
                        self.pool.beadfind_counter +
                        self.pool.analysis_counter, self.nb_max_jobs))

                predicted_total_HOST_memory = 0
                predicted_total_GPU_memory = 0

                # get list of all different Runs
                for run in self.runs_in_process:
                    blocks_per_run = [
                        i for i in self.blocks_to_process if i.run == run
                    ]
                    bf = len([
                        i for i in self.blocks_to_process
                        if i.run == run and i.status == 'performJustBeadFind'
                    ])
                    an = len([
                        i for i in self.blocks_to_process
                        if i.run == run and i.status == 'performAnalysis'
                    ])
                    try:
                        nb_max_beadfind_jobs = config.getint(
                            run.exp_chipversion, 'nb_max_beadfind_jobs')
                        nb_max_analysis_jobs = config.getint(
                            run.exp_chipversion, 'nb_max_analysis_jobs')
                    except:
                        nb_max_beadfind_jobs = config.getint(
                            'DefaultChip', 'nb_max_beadfind_jobs')
                        nb_max_analysis_jobs = config.getint(
                            'DefaultChip', 'nb_max_analysis_jobs')
                    if len(blocks_per_run):
                        logger.info(
                            'Chip: {0:8} Blocks: {1:3d}  Beadfind: {2:2d}/{3:2d}  Analysis: {4:2d}/{5:2d}  ({6})'
                            .format(run.exp_chipversion, len(blocks_per_run),
                                    bf, nb_max_beadfind_jobs, an,
                                    nb_max_analysis_jobs, run.name))
                    try:
                        HOST_memory_requirement_beadfind = config.getint(
                            run.exp_chipversion,
                            'HOST_memory_requirement_beadfind')
                        HOST_memory_requirement_analysis = config.getint(
                            run.exp_chipversion,
                            'HOST_memory_requirement_analysis')
                        GPU_memory_requirement_analysis = config.getint(
                            run.exp_chipversion,
                            'GPU_memory_requirement_analysis')
                    except:
                        HOST_memory_requirement_beadfind = config.getint(
                            'DefaultChip', 'HOST_memory_requirement_beadfind')
                        HOST_memory_requirement_analysis = config.getint(
                            'DefaultChip', 'HOST_memory_requirement_analysis')
                        GPU_memory_requirement_analysis = config.getint(
                            'DefaultChip', 'GPU_memory_requirement_analysis')
                    predicted_total_HOST_memory += int(
                        HOST_memory_requirement_beadfind) * bf
                    predicted_total_HOST_memory += int(
                        HOST_memory_requirement_analysis) * an
                    predicted_total_GPU_memory += int(
                        GPU_memory_requirement_analysis) * an

                # every 60 sec
                if time.time() - timestamp > 60:
                    logger.info('HOST: {0} G   GPU: {1} G'.format(
                        predicted_total_HOST_memory / 1073741824,
                        predicted_total_GPU_memory / 1073741824))

                # TODO: run.exp_oia_during_run
                # TODO: check for new runs only if no data acquisition
                # [CurExp,CurFlow] = self.getCurrentRunInformation()
                # every 60 sec check for new run
                if time.time() - timestamp > 60:
                    logger.debug('check for new run')
                    break

                # check status of blocks
                for block in self.blocks_to_process:

                    # check for processed blocks
                    if block.status == 'processed':
                        block.status = "idle"
                        if block.ret == 0:
                            if block.flow_end == -1:
                                block.beadfind_done = True
                            else:
                                block.successful_processed = block.flow_end + 1
                            if block.successful_processed == block.flows_total:
                                block.status = 'sigproc_done'
                        else:
                            logger.error(
                                'Block %s failed with return code %s' %
                                (block.name, block.ret))
                            block.nb_attempts += 1
                            block.sigproc_results_path_tmp = block.sigproc_results_path + "." + str(
                                block.nb_attempts)
                            block.beadfind_done = False
                            block.successful_processed = 0
                            block.flow_start = -1
                            block.flow_end = -1
                            block.status = "idle"

                    # processed blocks
                    if block.status == 'sigproc_done' or block.status == 'sigproc_failed':

                        # 1. rename block / last sigproc attempt
                        try:
                            if not os.path.exists(block.sigproc_results_path):
                                logger.info('rename block %s %s %s' %
                                            (block.name,
                                             block.sigproc_results_path_tmp,
                                             block.sigproc_results_path))
                                if block.nb_attempts >= config.getint(
                                        'global', 'nb_retries'):
                                    shutil.move(
                                        block.sigproc_results_path + "." +
                                        str(block.nb_attempts - 1),
                                        block.sigproc_results_path)
                                else:
                                    shutil.move(block.sigproc_results_path_tmp,
                                                block.sigproc_results_path)
                        except:
                            logger.error('renaming failed %s' % block.name)
                            logger.error(traceback.format_exc())
                            pass

                        # 2. remove *.step files
                        if block.status == 'sigproc_done':
                            try:
                                for filename in os.listdir(
                                        block.sigproc_results_path):
                                    if fnmatch.fnmatch(filename, 'step.*'):
                                        full_path_to_file = os.path.join(
                                            block.sigproc_results_path,
                                            filename)
                                        logger.info('remove step file: %s' %
                                                    full_path_to_file)
                                        os.remove(full_path_to_file)
                            except:
                                logger.error('removing step file failed %s' %
                                             block.name)
                                logger.error(traceback.format_exc())
                                pass
                        '''
                        # 3. generate MD5SUM for each output file
                        try:
                            md5sums = {}
                            for filename in os.listdir(block.sigproc_results_path):
                                full_filename = os.path.join(block.sigproc_results_path,filename)
                                if os.path.isdir(full_filename):
                                    continue
                                with open(full_filename,'rb') as f:
                                    binary_content = f.read()
                                    md5sums[filename] = hashlib.md5(binary_content).hexdigest()
                            with open(os.path.join(block.sigproc_results_path,'MD5SUMS'), 'w') as f:
                                for filename,hexdigest in md5sums.items():
                                    f.write("%s  %s\n" % (hexdigest,filename))
                        except:
                            logger.error(traceback.format_exc())
                        '''

                        block.status = 'ready_to_transfer'

                    if block.status == 'ready_to_transfer':
                        directory_to_transfer = "onboard_results/sigproc_results/" + "block_" + block.name
                        file_to_transfer = ""
                        logger.info("Register transfer: %s %s %s" %
                                    (block.run_name, directory_to_transfer,
                                     file_to_transfer))
                        ret = Transfer(block.run_name, directory_to_transfer,
                                       file_to_transfer)
                        if ret == 0:
                            block.status = "transferred"
                            try:
                                open(
                                    os.path.join(block.sigproc_results_path,
                                                 'transfer_requested.txt'),
                                    'w').close()
                            except:
                                logger.error(traceback.format_exc())
                                pass
                        else:
                            logger.error(
                                "Transfer failed %s %s %s, is datacollect running?"
                                % (block.run_name, directory_to_transfer,
                                   file_to_transfer))

                    if block.status == 'transferred':
                        logger.debug("DONE: %s" % (block.name))
                        block.status = 'done'
                        self.blocks_to_process.remove(block)

                try:
                    ablock = self.get_next_available_job(config)
                except:
                    ablock = None
                    logger.error(traceback.format_exc())

                if ablock:
                    if ablock.nb_attempts >= config.getint(
                            'global', 'nb_retries'):
                        ablock.status = 'sigproc_failed'
                    else:
                        ablock.status = 'queued'
                        ablock.info = '%s-%s' % (ablock.flow_start,
                                                 ablock.flow_end)
                        logger.debug(
                            '%s submitted (%s-%s)' %
                            (ablock.name, ablock.flow_start, ablock.flow_end))
                        self.pool.add_task(ablock)

            #wait 10 sec if no blocks are available
            time.sleep(10)
Пример #40
0
#your code goes here

"""
Your program is to use the brute-force approach in order to find the Answer to Life, the Universe, and Everything. More precisely... rewrite small numbers from input to output. Stop processing input after reading in the number 42. All numbers at input are integers of one or two digits.
"""

from guppy import hpy


def solution(l):
    for n in l:
        if (n == 42):
            break
        print n

if __name__ == "__main__":
    h = hpy()
    h.setref()
    solution([x for x in xrange(1,100)])
    print h.heap()
Пример #41
0
 def __init__(self, dataDir):
     dataRepository = datarepo.FileSystemDataRepository(dataDir)
     super(HeapProfilerBackend, self).__init__(dataRepository)
     self.profiler = guppy.hpy()
Пример #42
0
 def __init(self):
   super(memoryGetter,self).__init__():
   self.memoryData=[]
   self.endFlag=0
   self.h=hpy()
Пример #43
0
 def memory(self, *args):
     h = hpy()
     self.out(h.heap())
Пример #44
0
def ccache():
    if is_gae:
        form = FORM(
            P(
                TAG.BUTTON(T("Clear CACHE?"),
                           _type="submit",
                           _name="yes",
                           _value="yes")))
    else:
        cache.ram.initialize()
        cache.disk.initialize()

        form = FORM(
            P(
                TAG.BUTTON(T("Clear CACHE?"),
                           _type="submit",
                           _name="yes",
                           _value="yes")),
            P(
                TAG.BUTTON(T("Clear RAM"),
                           _type="submit",
                           _name="ram",
                           _value="ram")),
            P(
                TAG.BUTTON(T("Clear DISK"),
                           _type="submit",
                           _name="disk",
                           _value="disk")),
        )

    if form.accepts(request.vars, session):
        session.flash = ""
        if is_gae:
            if request.vars.yes:
                cache.ram.clear()
                session.flash += T("Cache Cleared")
        else:
            clear_ram = False
            clear_disk = False
            if request.vars.yes:
                clear_ram = clear_disk = True
            if request.vars.ram:
                clear_ram = True
            if request.vars.disk:
                clear_disk = True
            if clear_ram:
                cache.ram.clear()
                session.flash += T("Ram Cleared")
            if clear_disk:
                cache.disk.clear()
                session.flash += T("Disk Cleared")
        redirect(URL(r=request))

    try:
        from guppy import hpy
        hp = hpy()
    except ImportError:
        hp = False

    import shelve
    import os
    import copy
    import time
    import math
    from gluon import portalocker

    ram = {
        'entries': 0,
        'bytes': 0,
        'objects': 0,
        'hits': 0,
        'misses': 0,
        'ratio': 0,
        'oldest': time.time(),
        'keys': []
    }

    disk = copy.copy(ram)
    total = copy.copy(ram)
    disk['keys'] = []
    total['keys'] = []

    def GetInHMS(seconds):
        hours = math.floor(seconds / 3600)
        seconds -= hours * 3600
        minutes = math.floor(seconds / 60)
        seconds -= minutes * 60
        seconds = math.floor(seconds)

        return (hours, minutes, seconds)

    if is_gae:
        gae_stats = cache.ram.client.get_stats()
        try:
            gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
                                  (gae_stats['hits'] + gae_stats['misses']))
        except ZeroDivisionError:
            gae_stats['ratio'] = T("?")
        gae_stats['oldest'] = GetInHMS(time.time() -
                                       gae_stats['oldest_item_age'])
        total.update(gae_stats)
    else:
        # get ram stats directly from the cache object
        ram_stats = cache.ram.stats[request.application]
        ram['hits'] = ram_stats['hit_total'] - ram_stats['misses']
        ram['misses'] = ram_stats['misses']
        try:
            ram['ratio'] = ram['hits'] * 100 / ram_stats['hit_total']
        except (KeyError, ZeroDivisionError):
            ram['ratio'] = 0

        for key, value in cache.ram.storage.iteritems():
            if hp:
                ram['bytes'] += hp.iso(value[1]).size
                ram['objects'] += hp.iso(value[1]).count
            ram['entries'] += 1
            if value[0] < ram['oldest']:
                ram['oldest'] = value[0]
            ram['keys'].append((key, GetInHMS(time.time() - value[0])))

        for key in cache.disk.storage:
            value = cache.disk.storage[key]
            if isinstance(value[1], dict):
                disk['hits'] = value[1]['hit_total'] - value[1]['misses']
                disk['misses'] = value[1]['misses']
                try:
                    disk['ratio'] = disk['hits'] * 100 / value[1]['hit_total']
                except (KeyError, ZeroDivisionError):
                    disk['ratio'] = 0
            else:
                if hp:
                    disk['bytes'] += hp.iso(value[1]).size
                    disk['objects'] += hp.iso(value[1]).count
                disk['entries'] += 1
                if value[0] < disk['oldest']:
                    disk['oldest'] = value[0]
                disk['keys'].append((key, GetInHMS(time.time() - value[0])))

        ram_keys = ram.keys(
        )  # ['hits', 'objects', 'ratio', 'entries', 'keys', 'oldest', 'bytes', 'misses']
        ram_keys.remove('ratio')
        ram_keys.remove('oldest')
        for key in ram_keys:
            total[key] = ram[key] + disk[key]

        try:
            total['ratio'] = total['hits'] * 100 / (total['hits'] +
                                                    total['misses'])
        except (KeyError, ZeroDivisionError):
            total['ratio'] = 0

        if disk['oldest'] < ram['oldest']:
            total['oldest'] = disk['oldest']
        else:
            total['oldest'] = ram['oldest']

        ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
        disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
        total['oldest'] = GetInHMS(time.time() - total['oldest'])

    def key_table(keys):
        return TABLE(
            TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
            *[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
            **dict(_class='cache-keys',
                   _style="border-collapse: separate; border-spacing: .5em;"))

    if not is_gae:
        ram['keys'] = key_table(ram['keys'])
        disk['keys'] = key_table(disk['keys'])
        total['keys'] = key_table(total['keys'])

    return dict(form=form,
                total=total,
                ram=ram,
                disk=disk,
                object_stats=hp != False)
Пример #45
0
    def actionStats(self):
        import gc
        import sys
        from Ui import UiRequest
        from Crypt import CryptConnection
        import main


        hpy = None
        if self.get.get("size") == "1":  # Calc obj size
            try:
                import guppy
                hpy = guppy.hpy()
            except:
                pass
        self.sendHeader()

        if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
            yield "This function is disabled on this proxy"
            return

        s = time.time()

        # Style
        yield """
        <style>
         * { font-family: monospace }
         table td, table th { text-align: right; padding: 0px 10px }
         .connections td { white-space: nowrap }
         .serving-False { opacity: 0.3 }
        </style>
        """

        # Memory
        yield "rev%s | " % config.rev
        yield "%s | " % main.file_server.ip_external_list
        yield "Port: %s | " % main.file_server.port
        yield "IP Network: %s | " % main.file_server.supported_ip_types
        yield "Opened: %s | " % main.file_server.port_opened
        yield "Crypt: %s, TLSv1.3: %s | " % (CryptConnection.manager.crypt_supported, CryptConnection.ssl.HAS_TLSv1_3)
        yield "In: %.2fMB, Out: %.2fMB  | " % (
            float(main.file_server.bytes_recv) / 1024 / 1024,
            float(main.file_server.bytes_sent) / 1024 / 1024
        )
        yield "Peerid: %s  | " % main.file_server.peer_id
        yield "Time correction: %.2fs" % main.file_server.getTimecorrection()

        try:
            import psutil
            process = psutil.Process(os.getpid())
            mem = process.get_memory_info()[0] / float(2 ** 20)
            yield "Mem: %.2fMB | " % mem
            yield "Threads: %s | " % len(process.threads())
            yield "CPU: usr %.2fs sys %.2fs | " % process.cpu_times()
            yield "Files: %s | " % len(process.open_files())
            yield "Sockets: %s | " % len(process.connections())
            yield "Calc size <a href='?size=1'>on</a> <a href='?size=0'>off</a>"
        except Exception:
            pass
        yield "<br>"

        # Connections
        yield "<b>Connections</b> (%s, total made: %s, in: %s, out: %s):<br>" % (
            len(main.file_server.connections), main.file_server.last_connection_id, main.file_server.num_incoming, main.file_server.num_outgoing
        )
        yield "<table class='connections'><tr> <th>id</th> <th>type</th> <th>ip</th> <th>open</th> <th>crypt</th> <th>ping</th>"
        yield "<th>buff</th> <th>bad</th> <th>idle</th> <th>open</th> <th>delay</th> <th>cpu</th> <th>out</th> <th>in</th> <th>last sent</th>"
        yield "<th>wait</th> <th>version</th> <th>time</th> <th>sites</th> </tr>"
        for connection in main.file_server.connections:
            if "cipher" in dir(connection.sock):
                cipher = connection.sock.cipher()[0]
                tls_version = connection.sock.version()
            else:
                cipher = connection.crypt
                tls_version = ""
            if "time" in connection.handshake and connection.last_ping_delay:
                time_correction = connection.handshake["time"] - connection.handshake_time - connection.last_ping_delay
            else:
                time_correction = 0.0
            yield self.formatTableRow([
                ("%3d", connection.id),
                ("%s", connection.type),
                ("%s:%s", (connection.ip, connection.port)),
                ("%s", connection.handshake.get("port_opened")),
                ("<span title='%s %s'>%s</span>", (cipher, tls_version, connection.crypt)),
                ("%6.3f", connection.last_ping_delay),
                ("%s", connection.incomplete_buff_recv),
                ("%s", connection.bad_actions),
                ("since", max(connection.last_send_time, connection.last_recv_time)),
                ("since", connection.start_time),
                ("%.3f", max(-1, connection.last_sent_time - connection.last_send_time)),
                ("%.3f", connection.cpu_time),
                ("%.0fk", connection.bytes_sent / 1024),
                ("%.0fk", connection.bytes_recv / 1024),
                ("<span title='Recv: %s'>%s</span>", (connection.last_cmd_recv, connection.last_cmd_sent)),
                ("%s", list(connection.waiting_requests.keys())),
                ("%s r%s", (connection.handshake.get("version"), connection.handshake.get("rev", "?"))),
                ("%.2fs", time_correction),
                ("%s", connection.sites)
            ])
        yield "</table>"

        # Trackers
        yield "<br><br><b>Trackers:</b><br>"
        yield "<table class='trackers'><tr> <th>address</th> <th>request</th> <th>successive errors</th> <th>last_request</th></tr>"
        from Site import SiteAnnouncer # importing at the top of the file breaks plugins
        for tracker_address, tracker_stat in sorted(SiteAnnouncer.global_stats.items()):
            yield self.formatTableRow([
                ("%s", tracker_address),
                ("%s", tracker_stat["num_request"]),
                ("%s", tracker_stat["num_error"]),
                ("%.0f min ago", min(999, (time.time() - tracker_stat["time_request"]) / 60))
            ])
        yield "</table>"

        if "AnnounceShare" in PluginManager.plugin_manager.plugin_names:
            yield "<br><br><b>Shared trackers:</b><br>"
            yield "<table class='trackers'><tr> <th>address</th> <th>added</th> <th>found</th> <th>latency</th> <th>successive errors</th> <th>last_success</th></tr>"
            from AnnounceShare import AnnounceSharePlugin
            for tracker_address, tracker_stat in sorted(AnnounceSharePlugin.tracker_storage.getTrackers().items()):
                yield self.formatTableRow([
                    ("%s", tracker_address),
                    ("%.0f min ago", min(999, (time.time() - tracker_stat["time_added"]) / 60)),
                    ("%.0f min ago", min(999, (time.time() - tracker_stat.get("time_found", 0)) / 60)),
                    ("%.3fs", tracker_stat["latency"]),
                    ("%s", tracker_stat["num_error"]),
                    ("%.0f min ago", min(999, (time.time() - tracker_stat["time_success"]) / 60)),
                ])
            yield "</table>"

        # Tor hidden services
        yield "<br><br><b>Tor hidden services (status: %s):</b><br>" % main.file_server.tor_manager.status
        for site_address, onion in list(main.file_server.tor_manager.site_onions.items()):
            yield "- %-34s: %s<br>" % (site_address, onion)

        # Db
        yield "<br><br><b>Db</b>:<br>"
        for db in Db.opened_dbs:
            tables = [row["name"] for row in db.execute("SELECT name FROM sqlite_master WHERE type = 'table'").fetchall()]
            table_rows = {}
            for table in tables:
                table_rows[table] = db.execute("SELECT COUNT(*) AS c FROM %s" % table).fetchone()["c"]
            db_size = os.path.getsize(db.db_path) / 1024.0 / 1024.0
            yield "- %.3fs: %s %.3fMB, table rows: %s<br>" % (
                time.time() - db.last_query_time, db.db_path, db_size, json.dumps(table_rows, sort_keys=True)
            )


        # Sites
        yield "<br><br><b>Sites</b>:"
        yield "<table>"
        yield "<tr><th>address</th> <th>connected</th> <th title='connected/good/total'>peers</th> <th>content.json</th> <th>out</th> <th>in</th>  </tr>"
        for site in list(self.server.sites.values()):
            yield self.formatTableRow([
                (
                    """<a href='#' onclick='document.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
                    (site.address, site.address)
                ),
                ("%s", [peer.connection.id for peer in list(site.peers.values()) if peer.connection and peer.connection.connected]),
                ("%s/%s/%s", (
                    len([peer for peer in list(site.peers.values()) if peer.connection and peer.connection.connected]),
                    len(site.getConnectablePeers(100)),
                    len(site.peers)
                )),
                ("%s (loaded: %s)", (
                    len(site.content_manager.contents),
                    len([key for key, val in dict(site.content_manager.contents).items() if val])
                )),
                ("%.0fk", site.settings.get("bytes_sent", 0) / 1024),
                ("%.0fk", site.settings.get("bytes_recv", 0) / 1024),
            ], "serving-%s" % site.settings["serving"])
            yield "<tr><td id='peers_%s' style='display: none; white-space: pre' colspan=6>" % site.address
            for key, peer in list(site.peers.items()):
                if peer.time_found:
                    time_found = int(time.time() - peer.time_found) / 60
                else:
                    time_found = "--"
                if peer.connection:
                    connection_id = peer.connection.id
                else:
                    connection_id = None
                if site.content_manager.has_optional_files:
                    yield "Optional files: %4s " % len(peer.hashfield)
                time_added = (time.time() - peer.time_added) / (60 * 60 * 24)
                yield "(#%4s, rep: %2s, err: %s, found: %3s min, add: %.1f day) %30s -<br>" % (connection_id, peer.reputation, peer.connection_error, time_found, time_added, key)
            yield "<br></td></tr>"
        yield "</table>"

        # Big files
        yield "<br><br><b>Big files</b>:<br>"
        for site in list(self.server.sites.values()):
            if not site.settings.get("has_bigfile"):
                continue
            bigfiles = {}
            yield """<a href="#" onclick='document.getElementById("bigfiles_%s").style.display="initial"; return false'>%s</a><br>""" % (site.address, site.address)
            for peer in list(site.peers.values()):
                if not peer.time_piecefields_updated:
                    continue
                for sha512, piecefield in peer.piecefields.items():
                    if sha512 not in bigfiles:
                        bigfiles[sha512] = []
                    bigfiles[sha512].append(peer)

            yield "<div id='bigfiles_%s' style='display: none'>" % site.address
            for sha512, peers in bigfiles.items():
                yield "<br> - " + sha512 + " (hash id: %s)<br>" % site.content_manager.hashfield.getHashId(sha512)
                yield "<table>"
                for peer in peers:
                    yield "<tr><td>" + peer.key + "</td><td>" + peer.piecefields[sha512].tostring() + "</td></tr>"
                yield "</table>"
            yield "</div>"

        # Cmd stats
        yield "<div style='float: left'>"
        yield "<br><br><b>Sent commands</b>:<br>"
        yield "<table>"
        for stat_key, stat in sorted(main.file_server.stat_sent.items(), key=lambda i: i[1]["bytes"], reverse=True):
            yield "<tr><td>%s</td><td style='white-space: nowrap'>x %s =</td><td>%.0fkB</td></tr>" % (stat_key, stat["num"], stat["bytes"] / 1024)
        yield "</table>"
        yield "</div>"

        yield "<div style='float: left; margin-left: 20%; max-width: 50%'>"
        yield "<br><br><b>Received commands</b>:<br>"
        yield "<table>"
        for stat_key, stat in sorted(main.file_server.stat_recv.items(), key=lambda i: i[1]["bytes"], reverse=True):
            yield "<tr><td>%s</td><td style='white-space: nowrap'>x %s =</td><td>%.0fkB</td></tr>" % (stat_key, stat["num"], stat["bytes"] / 1024)
        yield "</table>"
        yield "</div>"
        yield "<div style='clear: both'></div>"

        # No more if not in debug mode
        if not config.debug:
            return

        # Object types

        obj_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type not in obj_count:
                obj_count[obj_type] = [0, 0]
            obj_count[obj_type][0] += 1  # Count
            obj_count[obj_type][1] += float(sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Objects in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(obj_count),
            sum([stat[0] for stat in list(obj_count.values())]),
            sum([stat[1] for stat in list(obj_count.values())])
        )

        for obj, stat in sorted(list(obj_count.items()), key=lambda x: x[1][0], reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Listobj?type=%s\">%s</a><br>" % (stat[1], stat[0], obj, html.escape(obj))

        # Classes

        class_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type != "<type 'instance'>":
                continue
            class_name = obj.__class__.__name__
            if class_name not in class_count:
                class_count[class_name] = [0, 0]
            class_count[class_name][0] += 1  # Count
            class_count[class_name][1] += float(sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Classes in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(class_count),
            sum([stat[0] for stat in list(class_count.values())]),
            sum([stat[1] for stat in list(class_count.values())])
        )

        for obj, stat in sorted(list(class_count.items()), key=lambda x: x[1][0], reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Dumpobj?class=%s\">%s</a><br>" % (stat[1], stat[0], obj, html.escape(obj))

        from greenlet import greenlet
        objs = [obj for obj in gc.get_objects() if isinstance(obj, greenlet)]
        yield "<br>Greenlets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from Worker import Worker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Worker)]
        yield "<br>Workers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from Connection import Connection
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Connection)]
        yield "<br>Connections (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from socket import socket
        objs = [obj for obj in gc.get_objects() if isinstance(obj, socket)]
        yield "<br>Sockets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from msgpack import Unpacker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Unpacker)]
        yield "<br>Msgpack unpacker (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from Site.Site import Site
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Site)]
        yield "<br>Sites (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        objs = [obj for obj in gc.get_objects() if isinstance(obj, self.server.log.__class__)]
        yield "<br>Loggers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj.name)))

        objs = [obj for obj in gc.get_objects() if isinstance(obj, UiRequest)]
        yield "<br>UiRequests (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        from Peer import Peer
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Peer)]
        yield "<br>Peers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), html.escape(repr(obj)))

        objs = [(key, val) for key, val in sys.modules.items() if val is not None]
        objs.sort()
        yield "<br>Modules (%s):<br>" % len(objs)
        for module_name, module in objs:
            yield " - %.3fkb: %s %s<br>" % (self.getObjSize(module, hpy), module_name, html.escape(repr(module)))

        gc.collect()  # Implicit grabage collection
        yield "Done in %.1f" % (time.time() - s)
Пример #46
0
    return time_taken


times_taken = take_time(created_list)

# plot graph
plt.title("Input Size vs Time taken")
plt.xlabel("Input size")
plt.ylabel("Time taken")
plt.plot(created_list, times_taken)
plt.show()

# ii.Space graph

# creating session context
h = hpy()


# create function that records space used for each input size
def take_space(array):
    space_taken = []

    for i in range(1, len(array) + 1):
        sub_list = array[0:i]
        h.setrelheap()
        Question3.find_maximum(sub_list)
        raw_string = repr(h)
        raw_string = raw_string.split()
        space = raw_string[10]
        space_taken.append(space)
    return space_taken
Пример #47
0
def main():
    """Processes command line to preprocess a file or a directory."""
    program_version = "v%s" % __version__
    program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    program_license = """%s
  Created by Paul Ross on %s.
  Copyright 2008-2015. All rights reserved.
  Licensed under GPL 2.0
USAGE
""" % (program_shortdesc, str(__date__))
    parser = argparse.ArgumentParser(
        description=program_license,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        "-c",
        action="store_true",
        dest="plot_conditional",
        default=False,
        help=
        "Add conditionally included files to the plots. [default: %(default)s]"
    )
    parser.add_argument("-d",
                        "--dump",
                        action="append",
                        dest="dump",
                        default=[],
                        help="""Dump output, additive. Can be:
C - Conditional compilation graph.
F - File names encountered and their count.
I - Include graph.
M - Macro environment.
T - Token count.
R - Macro dependencies as an input to DOT.
[default: %(default)s]""")
    parser.add_argument(
        "-g",
        "--glob",
        type=str,
        dest="glob",
        default="*.*",
        help=
        "Pattern match to use when processing directories. [default: %(default)s]"
    )
    parser.add_argument("--heap",
                        action="store_true",
                        dest="heap",
                        default=False,
                        help="Profile memory usage. [default: %(default)s]")
    parser.add_argument(
            "-j", "--jobs",
            type=int,
            dest="jobs",
            default=0,
            help="""Max simultaneous processes when pre-processing
directories. Zero uses number of native CPUs [%d].
1 means no multiprocessing."""
                    % multiprocessing.cpu_count() \
                    + " [default: %(default)s]"
        )
    parser.add_argument("-k",
                        "--keep-going",
                        action="store_true",
                        dest="keep_going",
                        default=False,
                        help="Keep going. [default: %(default)s]")
    parser.add_argument(
            "-l", "--loglevel",
            type=int,
            dest="loglevel",
            default=30,
            help="Log Level (debug=10, info=20, warning=30, error=40, critical=50)" \
            " [default: %(default)s]"
        )
    parser.add_argument("-o",
                        "--output",
                        type=str,
                        dest="output",
                        default="out",
                        help="Output directory. [default: %(default)s]")
    parser.add_argument(
        "-p",
        action="store_true",
        dest="ignore_pragma",
        default=False,
        help="Ignore pragma statements. [default: %(default)s]")
    parser.add_argument(
        "-r",
        "--recursive",
        action="store_true",
        dest="recursive",
        default=False,
        help="Recursively process directories. [default: %(default)s]")
    parser.add_argument(
        "-t",
        "--dot",
        action="store_true",
        dest="include_dot",
        default=False,
        help="""Write an DOT include dependency table and execute DOT
on it to create a SVG file. [default: %(default)s]""")
    parser.add_argument(
        "-G",
        action="store_true",
        dest="gcc_extensions",
        default=False,
        help=
        """Support GCC extensions. Currently only #include_next. [default: %(default)s]"""
    )
    parser.add_argument(dest="path", nargs=1, help="Path to source file.")
    Cpp.addStandardArguments(parser)
    args = parser.parse_args()
    #     print(' ARGS '.center(75, '-'))
    #     print(args)
    #     print(' END: ARGS '.center(75, '-'))
    clkStart = time.clock()
    # Initialise logging etc.
    inPath = args.path[0]
    if args.jobs != 1 and os.path.isdir(inPath):
        # Multiprocessing
        logFormat = '%(asctime)s %(levelname)-8s [%(process)5d] %(message)s'
    else:
        logFormat = '%(asctime)s %(levelname)-8s %(message)s'
    logging.basicConfig(
        level=args.loglevel,
        format=logFormat,
        # datefmt='%y-%m-%d % %H:%M:%S',
        stream=sys.stdout)
    # Memory usage dump
    if args.heap:
        try:
            from guppy import hpy
        except ImportError:
            print('Can not profile memory as you do not have guppy installed:' \
                  ' http://guppy-pe.sourceforge.net/')
            args.heap = False
    # Start memory profiling if requested
    if args.heap:
        myHeap = hpy()
        myHeap.setrelheap()
    else:
        myHeap = None
    # Create objects to pass to pre-processor
    myIncH = IncludeHandler.CppIncludeStdOs(
        theUsrDirs=args.incUsr or [],
        theSysDirs=args.incSys or [],
    )
    preDefMacros = {}
    if args.predefines:
        for d in args.predefines:
            _tup = d.split('=')
            if len(_tup) == 2:
                preDefMacros[_tup[0]] = _tup[1] + '\n'
            elif len(_tup) == 1:
                preDefMacros[_tup[0]] = '\n'
            else:
                raise ValueError('Can not read macro definition: %s' % d)
    # Create the job specification
    jobSpec = MainJobSpec(
        incHandler=myIncH,
        preDefMacros=preDefMacros,
        preIncFiles=Cpp.predefinedFileObjects(args),
        diagnostic=CppDiagnostic.PreprocessDiagnosticKeepGoing()
        if args.keep_going else None,
        pragmaHandler=PragmaHandler.PragmaHandlerNull()
        if args.ignore_pragma else None,
        keepGoing=args.keep_going,
        conditionalLevel=2 if args.plot_conditional else 0,
        dumpList=args.dump,
        helpMap=retOptionMap(parser, args),
        includeDOT=args.include_dot,
        cmdLine=' '.join(sys.argv),
        gccExtensions=args.gcc_extensions,
    )
    if os.path.isfile(inPath):
        preprocessFileToOutput(inPath, args.output, jobSpec)
        writeIndexHtml([inPath], args.output, jobSpec)
    elif os.path.isdir(inPath):
        preprocessDirToOutput(
            inPath,
            args.output,
            jobSpec,
            globMatch=args.glob,
            recursive=args.recursive,
            numJobs=args.jobs,
        )
    else:
        logging.fatal('%s is neither a file or a directory!' % inPath)
        return 1
    if args.heap and myHeap is not None:
        print('Dump of heap:')
        h = myHeap.heap()
        print(h)
        print()
        print('Dump of heap byrcs:')
        print(h.byrcs)
        print()
    clkExec = time.clock() - clkStart
    print('CPU time = %8.3f (S)' % clkExec)
    print('Bye, bye!')
    return 0
Пример #48
0
 def cmd_profile(self, conn):
     """Memory profiling for debugging."""
     from guppy import hpy
     heap = hpy().heap()
     print(heap)
Пример #49
0
# OAuth
OAUTH_AUTHENTICATION = 'liboauth.backend.OAuthBackend' in AUTHENTICATION_BACKENDS
if OAUTH_AUTHENTICATION:
    INSTALLED_APPS.append('liboauth')
    LOGIN_URL = '/oauth/accounts/login'
    SESSION_EXPIRE_AT_BROWSER_CLOSE = True

# URL Redirection white list.
if desktop.conf.REDIRECT_WHITELIST.get():
    MIDDLEWARE_CLASSES.append(
        'desktop.middleware.EnsureSafeRedirectURLMiddleware')

#Support HTTPS load-balancing
if desktop.conf.SECURE_PROXY_SSL_HEADER.get():
    SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')

############################################################

# Necessary for South to not fuzz with tests.  Fixed in South 0.7.1
SKIP_SOUTH_TESTS = True

# Set up environment variable so Kerberos libraries look at our private
# ticket cache
os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get()

# Memory
if desktop.conf.MEMORY_PROFILER.get():
    MEMORY_PROFILER = hpy()
    MEMORY_PROFILER.setrelheap()
Пример #50
0
def compute_depthmap(arguments):
    """Compute depthmap for a single shot."""
    log.setup()

    data, udata, neighbors, min_depth, max_depth, shot = arguments
    method = udata.config['depthmap_method']

    if udata.raw_depthmap_exists(shot.id):
        logger.info("Using precomputed raw depthmap {}".format(shot.id))
        return
    logger.info("Computing depthmap for image {0} with {1}".format(shot.id, method))

    hp=hpy()
    before=hp.heap()

    de = pydense.DepthmapEstimator()
    de.set_depth_range(min_depth, max_depth, 100)
    de.set_patchmatch_iterations(udata.config['depthmap_patchmatch_iterations'])
    de.set_patch_size(udata.config['depthmap_patch_size'])
    de.set_min_patch_sd(udata.config['depthmap_min_patch_sd'])
    add_views_to_depth_estimator(data,udata, neighbors, de)

    if (method == 'BRUTE_FORCE'):
        depth, plane, score, nghbr = de.compute_brute_force()
    elif (method == 'PATCH_MATCH'):
        depth, plane, score, nghbr = de.compute_patch_match()
    elif (method == 'PATCH_MATCH_SAMPLE'):
        depth, plane, score, nghbr = de.compute_patch_match_sample()
    else:
        raise ValueError(
            'Unknown depthmap method type '
            '(must be BRUTE_FORCE, PATCH_MATCH or PATCH_MATCH_SAMPLE)')

    good_score = score > udata.config['depthmap_min_correlation_score']
    depth = depth * (depth < max_depth) * good_score

    # Save and display results
    neighbor_ids = [i.id for i in neighbors[1:]]
    udata.save_raw_depthmap(shot.id, depth, plane, score, nghbr, neighbor_ids)

    depthmap={}
    depthmap.update({'depth':depth})
    depthmap.update({'plane':plane})
    depthmap.update({'score':score})
    depthmap.update({'nghbr':nghbr})
    depthmap.update({'nghbrs':neighbor_ids})

    #raw_depthmap.update({shot.id:depthmap})
    data.save_raw_depthmap(shot.id, depthmap)
    # after=hp.heap()
    # leftover=after-before
    # objgraph.show_most_common_types()
    # pdb.set_trace()
    # after1=hp.heap()
    # leftover1=after1-before
    # exit()

    # print(data)
    # print("pid == ",os.getpid())
    # print("depthmap== ", hex(id(depthmap)))
    # print("raw_depthmap== ", hex(id(raw_depthmap)))

    if udata.config['depthmap_save_debug_files']:
        image = data.udata_image[shot.id]  #load_undistorted_image(shot.id)
        #image[:, :, :3] = image[:, :, [2, 1, 0]]
        image = scale_down_image(image, depth.shape[1], depth.shape[0])
        ply = depthmap_to_ply(shot, depth, image)
        # raw_ply.update({shot.id:ply})
        # print("ply== ", hex(id(ply)))
        # print("raw_ply== ", hex(id(raw_ply)))
        with io.open_wt(udata._depthmap_file(shot.id, 'raw.npz.ply')) as fout:
            fout.write(ply)
        data.save_raw_ply(shot.id,ply)
        print('available memory== ', memory_available())
        print("current memory usage==", current_memory_usage())
Пример #51
0
                         centre_of_mass):
    # define output file
    out_file_template = '{0}/toy_mc_N_{1}_from_{2}_to_{3}_{4}TeV.root'
    output_file_name = out_file_template.format(output_folder, n_toy, start_at,
                                                start_at + n_input_mc,
                                                centre_of_mass)
    return output_file_name


if __name__ == "__main__":
    if DEBUG:
        sys.argv.append("-h")
    if TESTRUN:
        import doctest
        doctest.testmod()
    if PROFILE:
        import cProfile
        import pstats
        from guppy import hpy
        profile_filename = 'dps.analysis.unfolding_tests.create_toy_mc_from_tree_profile.txt'
        cProfile.run('main()', profile_filename)
        statsfile = open("profile_stats.txt", "wb")
        p = pstats.Stats(profile_filename, stream=statsfile)
        stats = p.strip_dirs().sort_stats('cumulative')
        stats.print_stats()
        h = hpy().heap()
        statsfile.write(str(h))
        statsfile.close()
        sys.exit(0)
    sys.exit(main())
Пример #52
0
def ccache():
    form = FORM(
        P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")),
        P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")),
        P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")),
    )

    if form.accepts(request.vars, session):
        clear_ram = False
        clear_disk = False
        session.flash = ""
        if request.vars.yes:
            clear_ram = clear_disk = True
        if request.vars.ram:
            clear_ram = True
        if request.vars.disk:
            clear_disk = True

        if clear_ram:
            cache.ram.clear()
            session.flash += "Ram Cleared "
        if clear_disk:
            cache.disk.clear()
            session.flash += "Disk Cleared"

        redirect(URL(r=request, c='appadmin'))

    try:
        from guppy import hpy; hp=hpy()
    except ImportError:
        hp = False

    import shelve, os, copy, time, math
    from gluon import portalocker

    ram = {
        'bytes': 0,
        'objects': 0,
        'hits': 0,
        'misses': 0,
        'ratio': 0,
        'oldest': time.time()
    }
    disk = copy.copy(ram)
    total = copy.copy(ram)

    for key, value in cache.ram.storage.items():
        if isinstance(value, dict):
            ram['hits'] = value['hit_total'] - value['misses']
            ram['misses'] = value['misses']
            try:
                ram['ratio'] = ram['hits'] * 100 / value['hit_total']
            except (KeyError, ZeroDivisionError):
                ram['ratio'] = 0
        else:
            if hp:
                ram['bytes'] += hp.iso(value[1]).size
                ram['objects'] += hp.iso(value[1]).count

                if value[0] < ram['oldest']:
                    ram['oldest'] = value[0]

    locker = open(os.path.join(request.folder,
                                        'cache/cache.lock'), 'a')
    portalocker.lock(locker, portalocker.LOCK_EX)
    disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve'))
    try:
        for key, value in disk_storage.items():
            if isinstance(value, dict):
                disk['hits'] = value['hit_total'] - value['misses']
                disk['misses'] = value['misses']
                try:
                    disk['ratio'] = disk['hits'] * 100 / value['hit_total']
                except (KeyError, ZeroDivisionError):
                    disk['ratio'] = 0
            else:
                if hp:
                    disk['bytes'] += hp.iso(value[1]).size
                    disk['objects'] += hp.iso(value[1]).count
                    if value[0] < disk['oldest']:
                        disk['oldest'] = value[0]
    finally:
        portalocker.unlock(locker)
        locker.close()
        disk_storage.close()

    total['bytes'] = ram['bytes'] + disk['bytes']
    total['objects'] = ram['objects'] + disk['objects']
    total['hits'] = ram['hits'] + disk['hits']
    total['misses'] = ram['misses'] + disk['misses']
    try:
        total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses'])
    except (KeyError, ZeroDivisionError):
        total['ratio'] = 0

    if disk['oldest'] < ram['oldest']:
        total['oldest'] = disk['oldest']
    else:
        total['oldest'] = ram['oldest']

    def GetInHMS(seconds):
        hours = math.floor(seconds / 3600)
        seconds -= hours * 3600
        minutes = math.floor(seconds / 60)
        seconds -= minutes * 60
        seconds = math.floor(seconds)

        return (hours, minutes, seconds)

    ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
    disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
    total['oldest'] = GetInHMS(time.time() - total['oldest'])

    return dict(form=form, total=total,
                ram=ram, disk=disk)
Пример #53
0
https://gist.github.com/earwig/28a64ffb94d51a608e3d
"""

import ctypes
from ctypes import pythonapi as api
import sys
from types import (BuiltinFunctionType, GetSetDescriptorType, FrameType,
                   MemberDescriptorType, MethodType)

try:
    import guppy
    from guppy.heapy import Path
except Exception:
    raise ImportError('Cannot use local decorator without guppy!')

hp = guppy.hpy()


def _w(x):  # NOSONAR
    def f():  # NOSONAR
        x  # NOSONAR

    return f


if sys.version_info < (3, 0):
    CellType = type(_w(0).func_closure[0])
else:
    CellType = type(_w(0).__closure__[0])

del _w
Пример #54
0
    async def heap(self, ctx):
        if ctx.author.id in config.ADMINS:
            h = hpy()
            val = h.heap()

            print(val)
Пример #55
0
 def get_heap_mem_usage(self):
     return hpy().heap().size
Пример #56
0
    def actionStats(self):
        import gc
        import sys
        from Ui import UiRequest
        from Db import Db
        from Crypt import CryptConnection

        hpy = None
        if self.get.get("size") == "1":  # Calc obj size
            try:
                import guppy
                hpy = guppy.hpy()
            except:
                pass
        self.sendHeader()

        if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
            yield "This function is disabled on this proxy"
            raise StopIteration

        s = time.time()
        main = sys.modules["main"]

        # Style
        yield """
        <style>
         * { font-family: monospace }
         table td, table th { text-align: right; padding: 0px 10px }
         .connections td { white-space: nowrap }
         .serving-False { opacity: 0.3 }
        </style>
        """

        # Memory
        try:
            yield "rev%s | " % config.rev
            yield "%s | " % config.ip_external
            yield "Opened: %s | " % main.file_server.port_opened
            yield "Crypt: %s | " % CryptConnection.manager.crypt_supported
            yield "In: %.2fMB, Out: %.2fMB  | " % (
                float(main.file_server.bytes_recv) / 1024 / 1024,
                float(main.file_server.bytes_sent) / 1024 / 1024)
            yield "Peerid: %s  | " % main.file_server.peer_id
            import psutil
            process = psutil.Process(os.getpid())
            mem = process.get_memory_info()[0] / float(2**20)
            yield "Mem: %.2fMB | " % mem
            yield "Threads: %s | " % len(process.threads())
            yield "CPU: usr %.2fs sys %.2fs | " % process.cpu_times()
            yield "Files: %s | " % len(process.open_files())
            yield "Sockets: %s | " % len(process.connections())
            yield "Calc size <a href='?size=1'>on</a> <a href='?size=0'>off</a>"
        except Exception:
            pass
        yield "<br>"

        # Connections
        yield "<b>Connections</b> (%s, total made: %s):<br>" % (len(
            main.file_server.connections), main.file_server.last_connection_id)
        yield "<table class='connections'><tr> <th>id</th> <th>type</th> <th>ip</th> <th>open</th> <th>crypt</th> <th>ping</th>"
        yield "<th>buff</th> <th>bad</th> <th>idle</th> <th>open</th> <th>delay</th> <th>cpu</th> <th>out</th> <th>in</th> <th>last sent</th>"
        yield "<th>wait</th> <th>version</th> <th>sites</th> </tr>"
        for connection in main.file_server.connections:
            if "cipher" in dir(connection.sock):
                cipher = connection.sock.cipher()[0]
            else:
                cipher = connection.crypt
            yield self.formatTableRow([
                ("%3d", connection.id), ("%s", connection.type),
                ("%s:%s", (connection.ip, connection.port)),
                ("%s", connection.handshake.get("port_opened")),
                ("<span title='%s'>%s</span>", (connection.crypt, cipher)),
                ("%6.3f", connection.last_ping_delay),
                ("%s", connection.incomplete_buff_recv),
                ("%s", connection.bad_actions),
                ("since",
                 max(connection.last_send_time, connection.last_recv_time)),
                ("since", connection.start_time),
                ("%.3f",
                 connection.last_sent_time - connection.last_send_time),
                ("%.3f", connection.cpu_time),
                ("%.0fkB", connection.bytes_sent / 1024),
                ("%.0fkB", connection.bytes_recv / 1024),
                ("%s", connection.last_cmd),
                ("%s", connection.waiting_requests.keys()),
                ("%s r%s", (connection.handshake.get("version"),
                            connection.handshake.get("rev", "?"))),
                ("%s", connection.sites)
            ])
        yield "</table>"

        # Tor hidden services
        yield "<br><br><b>Tor hidden services (status: %s):</b><br>" % main.file_server.tor_manager.status
        for site_address, onion in main.file_server.tor_manager.site_onions.items(
        ):
            yield "- %-34s: %s<br>" % (site_address, onion)

        # Db
        yield "<br><br><b>Db</b>:<br>"
        for db in sys.modules["Db.Db"].opened_dbs:
            yield "- %.3fs: %s<br>" % (time.time() - db.last_query_time,
                                       db.db_path.encode("utf8"))

        # Sites
        yield "<br><br><b>Sites</b>:"
        yield "<table>"
        yield "<tr><th>address</th> <th>connected</th> <th title='connected/good/total'>peers</th> <th>content.json</th> <th>out</th> <th>in</th>  </tr>"
        for site in sorted(self.server.sites.values(),
                           lambda a, b: cmp(a.address, b.address)):
            yield self.formatTableRow([
                ("""<a href='#' onclick='document.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
                 (site.address, site.address)),
                ("%s", [
                    peer.connection.id for peer in site.peers.values()
                    if peer.connection and peer.connection.connected
                ]),
                ("%s/%s/%s", (len([
                    peer for peer in site.peers.values()
                    if peer.connection and peer.connection.connected
                ]), len(site.getConnectablePeers(100)), len(site.peers))),
                ("%s (loaded: %s)",
                 (len(site.content_manager.contents),
                  len([
                      key for key, val in dict(
                          site.content_manager.contents).iteritems() if val
                  ]))),
                ("%.0fkB", site.settings.get("bytes_sent", 0) / 1024),
                ("%.0fkB", site.settings.get("bytes_recv", 0) / 1024),
            ], "serving-%s" % site.settings["serving"])
            yield "<tr><td id='peers_%s' style='display: none; white-space: pre' colspan=6>" % site.address
            for key, peer in site.peers.items():
                if peer.time_found:
                    time_found = int(time.time() - peer.time_found) / 60
                else:
                    time_found = "--"
                if peer.connection:
                    connection_id = peer.connection.id
                else:
                    connection_id = None
                if site.content_manager.hashfield:
                    yield "Optional files: %4s " % len(peer.hashfield)
                time_added = (time.time() - peer.time_added) / (60 * 60 * 24)
                yield "(#%4s, err: %s, found: %3s min, add: %.1f day) %30s -<br>" % (
                    connection_id, peer.connection_error, time_found,
                    time_added, key)
            yield "<br></td></tr>"
        yield "</table>"

        # No more if not in debug mode
        if not config.debug:
            raise StopIteration

        # Object types

        obj_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type not in obj_count:
                obj_count[obj_type] = [0, 0]
            obj_count[obj_type][0] += 1  # Count
            obj_count[obj_type][1] += float(sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Objects in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(obj_count), sum([stat[0] for stat in obj_count.values()]),
            sum([stat[1] for stat in obj_count.values()]))

        for obj, stat in sorted(obj_count.items(),
                                key=lambda x: x[1][0],
                                reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Listobj?type=%s\">%s</a><br>" % (
                stat[1], stat[0], obj, cgi.escape(obj))

        # Classes

        class_count = {}
        for obj in gc.get_objects():
            obj_type = str(type(obj))
            if obj_type != "<type 'instance'>":
                continue
            class_name = obj.__class__.__name__
            if class_name not in class_count:
                class_count[class_name] = [0, 0]
            class_count[class_name][0] += 1  # Count
            class_count[class_name][1] += float(
                sys.getsizeof(obj)) / 1024  # Size

        yield "<br><br><b>Classes in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
            len(class_count), sum([stat[0] for stat in class_count.values()]),
            sum([stat[1] for stat in class_count.values()]))

        for obj, stat in sorted(class_count.items(),
                                key=lambda x: x[1][0],
                                reverse=True):  # Sorted by count
            yield " - %.1fkb = %s x <a href=\"/Dumpobj?class=%s\">%s</a><br>" % (
                stat[1], stat[0], obj, cgi.escape(obj))

        from greenlet import greenlet
        objs = [obj for obj in gc.get_objects() if isinstance(obj, greenlet)]
        yield "<br>Greenlets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj).encode("utf8")))

        from Worker import Worker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Worker)]
        yield "<br>Workers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        from Connection import Connection
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Connection)]
        yield "<br>Connections (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        from socket import socket
        objs = [obj for obj in gc.get_objects() if isinstance(obj, socket)]
        yield "<br>Sockets (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        from msgpack import Unpacker
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Unpacker)]
        yield "<br>Msgpack unpacker (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        from Site import Site
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Site)]
        yield "<br>Sites (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        objs = [
            obj for obj in gc.get_objects()
            if isinstance(obj, self.server.log.__class__)
        ]
        yield "<br>Loggers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj.name)))

        objs = [obj for obj in gc.get_objects() if isinstance(obj, UiRequest)]
        yield "<br>UiRequests (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        from Peer import Peer
        objs = [obj for obj in gc.get_objects() if isinstance(obj, Peer)]
        yield "<br>Peers (%s):<br>" % len(objs)
        for obj in objs:
            yield " - %.1fkb: %s<br>" % (self.getObjSize(
                obj, hpy), cgi.escape(repr(obj)))

        objs = [(key, val) for key, val in sys.modules.iteritems()
                if val is not None]
        objs.sort()
        yield "<br>Modules (%s):<br>" % len(objs)
        for module_name, module in objs:
            yield " - %.3fkb: %s %s<br>" % (self.getObjSize(
                module, hpy), module_name, cgi.escape(repr(module)))

        gc.collect()  # Implicit grabage collection
        yield "Done in %.1f" % (time.time() - s)
Пример #57
0
def ccache():
    form = FORM(
        P(
            TAG.BUTTON(T("Clear CACHE?"),
                       _type="submit",
                       _name="yes",
                       _value="yes")),
        P(TAG.BUTTON(T("Clear RAM"), _type="submit", _name="ram",
                     _value="ram")),
        P(
            TAG.BUTTON(T("Clear DISK"),
                       _type="submit",
                       _name="disk",
                       _value="disk")),
    )

    if form.accepts(request.vars, session):
        clear_ram = False
        clear_disk = False
        session.flash = ""
        if request.vars.yes:
            clear_ram = clear_disk = True
        if request.vars.ram:
            clear_ram = True
        if request.vars.disk:
            clear_disk = True

        if clear_ram:
            cache.ram.clear()
            session.flash += T("Ram Cleared")
        if clear_disk:
            cache.disk.clear()
            session.flash += T("Disk Cleared")

        redirect(URL(r=request))

    try:
        from guppy import hpy
        hp = hpy()
    except ImportError:
        hp = False

    import shelve
    import os
    import copy
    import time
    import math
    from gluon import portalocker

    ram = {
        'entries': 0,
        'bytes': 0,
        'objects': 0,
        'hits': 0,
        'misses': 0,
        'ratio': 0,
        'oldest': time.time(),
        'keys': []
    }
    disk = copy.copy(ram)
    total = copy.copy(ram)
    disk['keys'] = []
    total['keys'] = []

    def GetInHMS(seconds):
        hours = math.floor(seconds / 3600)
        seconds -= hours * 3600
        minutes = math.floor(seconds / 60)
        seconds -= minutes * 60
        seconds = math.floor(seconds)

        return (hours, minutes, seconds)

    for key, value in cache.ram.storage.items():
        if isinstance(value, dict):
            ram['hits'] = value['hit_total'] - value['misses']
            ram['misses'] = value['misses']
            try:
                ram['ratio'] = ram['hits'] * 100 / value['hit_total']
            except (KeyError, ZeroDivisionError):
                ram['ratio'] = 0
        else:
            if hp:
                ram['bytes'] += hp.iso(value[1]).size
                ram['objects'] += hp.iso(value[1]).count
            ram['entries'] += 1
            if value[0] < ram['oldest']:
                ram['oldest'] = value[0]
            ram['keys'].append((key, GetInHMS(time.time() - value[0])))
    folder = os.path.join(request.folder, 'cache')
    if not os.path.exists(folder):
        os.mkdir(folder)
    locker = open(os.path.join(folder, 'cache.lock'), 'a')
    portalocker.lock(locker, portalocker.LOCK_EX)
    disk_storage = shelve.open(os.path.join(folder, 'cache.shelve'))
    try:
        for key, value in disk_storage.items():
            if isinstance(value, dict):
                disk['hits'] = value['hit_total'] - value['misses']
                disk['misses'] = value['misses']
                try:
                    disk['ratio'] = disk['hits'] * 100 / value['hit_total']
                except (KeyError, ZeroDivisionError):
                    disk['ratio'] = 0
            else:
                if hp:
                    disk['bytes'] += hp.iso(value[1]).size
                    disk['objects'] += hp.iso(value[1]).count
                disk['entries'] += 1
                if value[0] < disk['oldest']:
                    disk['oldest'] = value[0]
                disk['keys'].append((key, GetInHMS(time.time() - value[0])))

    finally:
        portalocker.unlock(locker)
        locker.close()
        disk_storage.close()

    total['entries'] = ram['entries'] + disk['entries']
    total['bytes'] = ram['bytes'] + disk['bytes']
    total['objects'] = ram['objects'] + disk['objects']
    total['hits'] = ram['hits'] + disk['hits']
    total['misses'] = ram['misses'] + disk['misses']
    total['keys'] = ram['keys'] + disk['keys']
    try:
        total['ratio'] = total['hits'] * 100 / (total['hits'] +
                                                total['misses'])
    except (KeyError, ZeroDivisionError):
        total['ratio'] = 0

    if disk['oldest'] < ram['oldest']:
        total['oldest'] = disk['oldest']
    else:
        total['oldest'] = ram['oldest']

    ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
    disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
    total['oldest'] = GetInHMS(time.time() - total['oldest'])

    def key_table(keys):
        return TABLE(
            TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
            *[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
            **dict(_class='cache-keys',
                   _style="border-collapse: separate; border-spacing: .5em;"))

    ram['keys'] = key_table(ram['keys'])
    disk['keys'] = key_table(disk['keys'])
    total['keys'] = key_table(total['keys'])

    return dict(form=form,
                total=total,
                ram=ram,
                disk=disk,
                object_stats=hp != False)
Пример #58
0
    return _outer


def decode_profile(profile):
    stats = hotshot.stats.load(profile)
    #stats.strip_dirs()
    stats.sort_stats('time', 'calls')
    stats.print_stats(20)


###############################
# memory profiling            #
###############################

from guppy import hpy
hpy = hpy()
#SETUP_HPY = False  # defined in settings.py


def setup_hpy(reset=False):
    print "setting up heapy..."
    if (not settings.SETUP_HPY) or (reset):
        print "resetting it"
        hpy.setrelheap()
        settings.SETUP_HPY = True
    else:
        print "not resetting it"


def profile_memory(log_file):
Пример #59
0
from twisted.enterprise import adbapi
from twisted.internet import reactor, defer
from twisted.internet.error import CannotListenError
from twisted.internet.protocol import ServerFactory
from twisted.internet.threads import deferToThread

crochet.no_setup()
main_context = context.Context()
all_contexts = [main_context]

session = None
quit_confirm_time = None

try:
    from guppy import hpy
    heapstats = hpy()
    heapstats.setref()
except ImportError:
    heapstats = None


class PappySession(object):
    """
    An object representing a pappy session. Mainly you'll only use this to get to
    the session config.

    :ivar config: The configuration settings for the session
    :vartype config: :class:`pappyproxy.config.PappyConfig`
    """
    def __init__(self, sessconfig):
        self.config = sessconfig
Пример #60
0
except ImportError:
    from askbot.models.message import Message as DjangoMessage

from django.utils.translation import ugettext_lazy as _
from askbot.utils.console import ProgressBar
from askbot.utils.slug import slugify
from askbot.models.badges import award_badges_signal, award_badges
from askbot.importers.stackexchange.management import is_ready as importer_is_ready
from optparse import make_option
#from markdown2 import Markdown
#markdowner = Markdown(html4tags=True)

if DEBUGME == True:
    from guppy import hpy
    from askbot.utils import dummy_transaction as transaction
    HEAP = hpy()
else:
    #from django.db import transaction
    from askbot.utils import dummy_transaction as transaction

xml_read_order = (
    'VoteTypes',
    'UserTypes',
    'Users',
    'Users2Votes',
    'Badges',
    'Users2Badges',
    'CloseReasons',  #'FlatPages',
    'MessageTypes',
    'PostHistoryTypes',
    'PostTypes',