Ejemplo n.º 1
0
    def run(self, result=None):
        # run first time
        super(TestMemoryLeaks, self).run(result=result)
        if self.iterations == 0:
            return

        m_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
        o_count = gc.get_count()[0]
        m_hits = 0
        o_hits = 0
        for i in range(self.iterations):
            super(TestMemoryLeaks, self).run(result=result)
            m_usage_n = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
            if m_usage_n > m_usage:
                m_usage = m_usage_n
                m_hits += 1
            o_count_n = gc.get_count()[0]
            if o_count_n > o_count:
                o_count = o_count_n
                o_hits += 1
            del m_usage_n
            del o_count_n

        if m_hits > int(self.iterations * 0.8):
            result.buffer = False
            try:
                raise AssertionError("memory leak detected")
            except AssertionError:
                result.addError(self, sys.exc_info())
        if o_hits > int(self.iterations * 0.8):
            result.buffer = False
            try:
                raise AssertionError("unreferenced objects detected")
            except AssertionError:
                result.addError(self, sys.exc_info())
 def _train_func(engine, batch):
     # store garbage collection counts
     if trigger_event == Events.EPOCH_COMPLETED or trigger_event.lower() == "epoch":
         if engine.state.iteration % engine.state.epoch_length == 1:
             gb_count_dict[engine.state.epoch] = gc.get_count()
     elif trigger_event.lower() == "iteration":
         gb_count_dict[engine.state.iteration] = gc.get_count()
Ejemplo n.º 3
0
        def profiler(*args, **kwargs):
            nonlocal log_level
            log_level = log_level or (logging.TRACE if hasattr(logging, 'TRACE') else logging.DEBUG)
            if not logger.isEnabledFor(log_level):
                return fn(*args, **kwargs)

            name = fn_name(fn)
            with MemoryProfiler(enabled=memory) as m:
                if memory:
                    mem = m.usage(before=True)
                    logger.log(log_level,
                               "[PROFILING] `%s`\n"
                               "memory before; resident: %.2f MB, virtual: %.2f MB, unique: %.2f MB.\n"
                               "gc before; threshold: %s, gen_count: %s, perm_count: %s",
                               name,
                               mem.resident, mem.virtual, mem.unique,
                               gc.get_threshold(), gc.get_count(), gc.get_freeze_count())
                with Timer(enabled=duration) as t:
                    ret = fn(*args, **kwargs)
            if duration:
                logger.log(log_level, "[PROFILING] `%s` executed in %.3fs.", name, t.duration)
            if memory:
                ret_size = obj_size(ret)
                if ret_size > 0:
                    logger.log(log_level, "[PROFILING] `%s` returned object size: %.3f MB.", name, to_mb(ret_size))
                mem = m.usage()
                logger.log(log_level,
                           "[PROFILING] `%s`\n"
                           "memory after; resident: %+.2f MB/%.2f MB, virtual: %+.2f MB/%.2f MB, unique: %+.2f MB/%.2f MB.\n"
                           "gc after; threshold: %s, gen_count: %s, perm_count: %s",
                           name,
                           mem.resident_diff, mem.resident, mem.virtual_diff, mem.virtual, mem.unique_diff, mem.unique,
                           gc.get_threshold(), gc.get_count(), gc.get_freeze_count())
            return ret
Ejemplo n.º 4
0
def call_gc():
    (gen0, gen1, gen2) = gc.get_count()
    logger.info("Calling GC {0} garbage, {1}, {2}, {3}".
            format(len(gc.garbage), gen0, gen1, gen2))
    unreach = gc.collect()
    (gen0, gen1, gen2) = gc.get_count()
    print("Called GC {0} unreachable, {1} garbage, {2}, {3}, {4}".
            format(unreach, len(gc.garbage), gen0, gen1, gen2))
Ejemplo n.º 5
0
def call_gc():
    (gen0, gen1, gen2) = gc.get_count()
    logger.info("Calling GC {0} garbage, {1}, {2}, {3}".format(
        len(gc.garbage), gen0, gen1, gen2))
    unreach = gc.collect()
    (gen0, gen1, gen2) = gc.get_count()
    print("Called GC {0} unreachable, {1} garbage, {2}, {3}, {4}".format(
        unreach, len(gc.garbage), gen0, gen1, gen2))
Ejemplo n.º 6
0
def test_collect_generations():
    gc.collect()
    a = dict()
    gc.collect(0)
    expect(gc.get_count(), (0, 1, 0), "collect(0)")
    gc.collect(1)
    expect(gc.get_count(), (0, 0, 1), "collect(1)")
    gc.collect(2)
    expect(gc.get_count(), (0, 0, 0), "collect(1)")
Ejemplo n.º 7
0
 def test_get_count(self):
     gc.collect()
     a, b, c = gc.get_count()
     x = []
     d, e, f = gc.get_count()
     self.assertEqual((b, c), (0, 0))
     self.assertEqual((e, f), (0, 0))
     self.assertLess(a, 5)
     self.assertGreater(d, a)
Ejemplo n.º 8
0
def test_collect_generations():
    gc.collect()
    a = dict()
    gc.collect(0)
    expect(gc.get_count(), (0, 1, 0), "collect(0)")
    gc.collect(1)
    expect(gc.get_count(), (0, 0, 1), "collect(1)")
    gc.collect(2)
    expect(gc.get_count(), (0, 0, 0), "collect(1)")
Ejemplo n.º 9
0
 def test_get_count(self):
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     assertEqual(gc.get_count(), (0, 0, 0))
     a = dict()
     # since gc.collect(), we created two objects:
     # the dict, and the tuple returned by get_count()
     assertEqual(gc.get_count(), (2, 0, 0))
Ejemplo n.º 10
0
 def test_get_count(self):
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     assertEqual(gc.get_count(), (0, 0, 0))
     a = dict()
     # since gc.collect(), we created two objects:
     # the dict, and the tuple returned by get_count()
     assertEqual(gc.get_count(), (2, 0, 0))
Ejemplo n.º 11
0
 def do_gc(self, mode, indent):
     if ResourceLogger.is_enabled():
         c1 = gc.get_count()
         gc.collect()
         c2 = gc.get_count()
         ResourceLogger.print_res_spaces +=  indent
         GCPreempt.logger.info('%scollected (%d, %d, %d) objects %s %s' % (" "*ResourceLogger.print_res_spaces, c1[0]-c2[0], c1[1]-c2[1], c1[2]-c2[2], GCPreempt.WHEN_NAMES[mode], self.ctx_name))
     else:
         gc.collect()
Ejemplo n.º 12
0
 def IsLeaking(self, sslsocket_impl):
   sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   gc.disable()
   gc.collect()
   count0 = gc.get_count()[0]
   for i in xrange(1000):
     sslsocket_impl(sock)
   count1 = gc.get_count()[0]
   self.assertTrue(count1 >= count0)
   return count1 - count0 >= 1000
Ejemplo n.º 13
0
 def dismiss_callback(self, i):
     sel_photo_list = self.imageBrowse.fileChooser.selection
     if sel_photo_list:
         print("Selection: {}".format(sel_photo_list[0]))
         self.source = sel_photo_list[0]
     self.imageBrowse = None
     print(gc.get_count())
     gc.collect()
     print(gc.get_count())
     print(gc.garbage)
     self.popup_open = False
Ejemplo n.º 14
0
 def test_collect_generations(self):
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     a = dict()
     gc.collect(0)
     assertEqual(gc.get_count(), (0, 1, 0))
     gc.collect(1)
     assertEqual(gc.get_count(), (0, 0, 1))
     gc.collect(2)
     assertEqual(gc.get_count(), (0, 0, 0))
Ejemplo n.º 15
0
 def test_get_count(self):
     if test_support.due_to_ironpython_incompatibility("http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"):
         return
     # Avoid future allocation of method object
     assertEqual = self._baseAssertEqual
     gc.collect()
     assertEqual(gc.get_count(), (0, 0, 0))
     a = dict()
     # since gc.collect(), we created two objects:
     # the dict, and the tuple returned by get_count()
     assertEqual(gc.get_count(), (2, 0, 0))
Ejemplo n.º 16
0
 def dismiss_callback(self, i):
     sel_photo_list = self.imageBrowse.fileChooser.selection
     if sel_photo_list:
         print("Selection: {}".format(sel_photo_list[0]))
         self.source = sel_photo_list[0]
     self.imageBrowse = None
     print(gc.get_count())
     gc.collect()
     print(gc.get_count())
     print(gc.garbage)
     self.popup_open = False
Ejemplo n.º 17
0
 def test_collect_generations(self):
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     a = dict()
     gc.collect(0)
     assertEqual(gc.get_count(), (0, 1, 0))
     gc.collect(1)
     assertEqual(gc.get_count(), (0, 0, 1))
     gc.collect(2)
     assertEqual(gc.get_count(), (0, 0, 0))
Ejemplo n.º 18
0
 def test_get_count(self):
     gc.collect()
     a, b, c = gc.get_count()
     x = []
     d, e, f = gc.get_count()
     self.assertEqual((b, c), (0, 0))
     self.assertEqual((e, f), (0, 0))
     # This is less fragile than asserting that a equals 0.
     self.assertLess(a, 5)
     # Between the two calls to get_count(), at least one object was
     # created (the list).
     self.assertGreater(d, a)
Ejemplo n.º 19
0
 def test_collect_generations(self):
     gc.collect()
     x = []
     gc.collect(0)
     a, b, c = gc.get_count()
     gc.collect(1)
     d, e, f = gc.get_count()
     gc.collect(2)
     g, h, i = gc.get_count()
     self.assertEqual((b, c), (1, 0))
     self.assertEqual((e, f), (0, 1))
     self.assertEqual((h, i), (0, 0))
Ejemplo n.º 20
0
 def do_gc(self, mode, indent):
     if ResourceLogger.is_enabled():
         c1 = gc.get_count()
         gc.collect()
         c2 = gc.get_count()
         ResourceLogger.print_res_spaces += indent
         GCPreempt.logger.info('%scollected (%d, %d, %d) objects %s %s' %
                               (" " * ResourceLogger.print_res_spaces,
                                c1[0] - c2[0], c1[1] - c2[1], c1[2] - c2[2],
                                GCPreempt.WHEN_NAMES[mode], self.ctx_name))
     else:
         gc.collect()
 def test_get_count(self):
     gc.collect()
     a, b, c = gc.get_count()
     x = []
     d, e, f = gc.get_count()
     self.assertEqual((b, c), (0, 0))
     self.assertEqual((e, f), (0, 0))
     # This is less fragile than asserting that a equals 0.
     self.assertLess(a, 5)
     # Between the two calls to get_count(), at least one object was
     # created (the list).
     self.assertGreater(d, a)
Ejemplo n.º 22
0
 def test_get_count(self):
     if test_support.due_to_ironpython_incompatibility(
         "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
     ):
         return
     # Avoid future allocation of method object
     assertEqual = self._baseAssertEqual
     gc.collect()
     assertEqual(gc.get_count(), (0, 0, 0))
     a = dict()
     # since gc.collect(), we created two objects:
     # the dict, and the tuple returned by get_count()
     assertEqual(gc.get_count(), (2, 0, 0))
Ejemplo n.º 23
0
 def test_collect_generations(self):
     if test_support.due_to_ironpython_bug("http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314459"):
         return
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     a = dict()
     gc.collect(0)
     assertEqual(gc.get_count(), (0, 1, 0))
     gc.collect(1)
     assertEqual(gc.get_count(), (0, 0, 1))
     gc.collect(2)
     assertEqual(gc.get_count(), (0, 0, 0))
Ejemplo n.º 24
0
def f3():
    c1 = ClassA()
    c2 = ClassA()
    c1.t = c2
    c2.t = c1
    print(gc.get_count())
    del c1
    print(gc.get_count())
    del c2
    print(gc.garbage)
    print(gc.collect())
    print(gc.garbage)
    time.sleep(10)
Ejemplo n.º 25
0
 def test_gc_create0(self):
     gc.collect()
     cnt1 = gc.get_count()
     A = make_arrayclass("A", 2, gc=True)
     B = make_arrayclass("B", varsize=True, gc=True)
     b = B([], ())
     a = A({}, b)
     cnt2 = gc.get_count()
     self.assertEqual(gc.is_tracked(a), True)
     self.assertEqual(gc.is_tracked(b), True)
     del a
     gc.collect()
     cnt3 = gc.get_count()
     self.assertEqual(cnt1, cnt3)
Ejemplo n.º 26
0
 def test_gc_create0(self):
     gc.collect()
     cnt1 = gc.get_count()
     A = make_class("A", fields=2, gc=True)
     B = make_class("B", varsize=True, gc=True)
     b = B([], ())
     a = A({}, b)
     cnt2 = gc.get_count()
     self.assertEqual(gc.is_tracked(a), True)
     self.assertEqual(gc.is_tracked(b), True)
     del a
     gc.collect()
     cnt3 = gc.get_count()
     self.assertEqual(cnt1, cnt3)
Ejemplo n.º 27
0
 def test_create0(self):
     gc.collect()
     cnt1 = gc.get_count()
     A = make_class("A", fields=2)
     B = make_class("B", varsize=True)
     b = B([], ())
     a = A({}, b)
     cnt2 = gc.get_count()
     self.assertEqual(gc.is_tracked(a), False)
     self.assertEqual(gc.is_tracked(b), False)
     del a
     gc.collect()
     cnt3 = gc.get_count()
     self.assertEqual(cnt1, cnt3)
Ejemplo n.º 28
0
 def test_collect_generations(self):
     if test_support.due_to_ironpython_bug(
         "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314459"
     ):
         return
     # Avoid future allocation of method object
     assertEqual = self.assertEqual
     gc.collect()
     a = dict()
     gc.collect(0)
     assertEqual(gc.get_count(), (0, 1, 0))
     gc.collect(1)
     assertEqual(gc.get_count(), (0, 0, 1))
     gc.collect(2)
     assertEqual(gc.get_count(), (0, 0, 0))
Ejemplo n.º 29
0
def run():
    if features.get('debug.garbage'):
        gc.set_debug(gc.DEBUG_LEAK)

    while True:
        if osutil.is_paused():
            if features.get('debug.monitor.stop_on_pause'):
                log('log', 'monitor stopped because of pause')
                osutil.wait_for_resume()

        time.sleep(6)
        log('mem_self', get_mem_usage('self'))
        try:
            servpid = get_server_pid()
            server_use = get_mem_usage(servpid)
        except IOError:
            server_use = 'not running'

        log('mem_server', server_use)
        log('gc_stat',
            'counts=%r garbage=%d' % (gc.get_count(), len(gc.garbage)))

        if features.get('debug.gc_count'):
            gc.collect()
            log('gc_count', len(gc.get_objects()))
Ejemplo n.º 30
0
def handler(signum, frame):
    logger.debug('SIGUSR1 debug info radioPlayer')

    logger.debug('collected trace msgs: ' + str(len(mpv_msgs)))

    logger.debug(
        'volume pause mute core_idle idle_active demuxer_cache_duration demuxer_cache_time')
    logger.debug(str(radioPlayer.volume))
    logger.debug(str(radioPlayer.pause))
    logger.debug(str(radioPlayer.mute))
    logger.debug(str(radioPlayer.core_idle))
    logger.debug(str(radioPlayer.idle_active))
    logger.debug(str(radioPlayer.demuxer_cache_duration))
    logger.debug(str(radioPlayer.demuxer_cache_time))

    gc.collect()
    logger.debug('gc.garbage length: ' + str(len(gc.garbage)))
    logger.debug('gc enabled: ' + str(gc.isenabled()))
    logger.debug('gc counts: ' + str(gc.get_count()))

    logger.debug('UNCOLLECTABLE objects start')

    for obj in gc.garbage:
        try:
            logger.debug(str(obj))
        except Exception as e:
            logger.debug('Error in gc.garbage loop: {}'.format(e))
            pass

    logger.debug('UNCOLLECTABLE objects end')
Ejemplo n.º 31
0
def test_models():
    latent_channel_dim = 4

    test_encoder_output_channels(input_data_shape=(1, 3, 28, 28),
                                 latent_channel_dim=latent_channel_dim)
    test_encoder_output_channels(input_data_shape=(8, 3, 200, 200),
                                 latent_channel_dim=latent_channel_dim)

    test_decoder_output_channels(input_data_shape=(1, 3, 28, 28),
                                 latent_channel_dim=latent_channel_dim)
    test_decoder_output_channels(input_data_shape=(8, 3, 200, 200),
                                 latent_channel_dim=latent_channel_dim)

    test_autoencoder(input_data_shape=(1, 3, 28, 28),
                     latent_channel_dim=latent_channel_dim)
    test_autoencoder(input_data_shape=(8, 3, 200, 200),
                     latent_channel_dim=latent_channel_dim)

    print_model_details(AutoEncoder(latent_channel_dim))

    gc.collect()
    torch.cuda.empty_cache()

    print(gc.get_count())
    print(gc.get_stats())
Ejemplo n.º 32
0
    def status(self, mess, args):
        """ If I am alive I should be able to respond to this one
        """
        all_blacklisted = self.get_blacklisted_plugin()
        all_loaded = get_all_active_plugin_names()
        all_attempted = sorted([p.name for p in self.all_candidates])
        plugins_statuses = []
        for name in all_attempted:
            if name in all_blacklisted:
                if name in all_loaded:
                    plugins_statuses.append(('BL', name))
                else:
                    plugins_statuses.append(('BU', name))
            elif name in all_loaded:
                plugins_statuses.append(('L', name))
            elif get_plugin_obj_by_name(name) is not None and get_plugin_obj_by_name(name).get_configuration_template() is not None and self.get_plugin_configuration(name) is None:
                plugins_statuses.append(('C', name))
            else:
                plugins_statuses.append(('U', name))

        #noinspection PyBroadException
        try:
            from posix import getloadavg

            loads = getloadavg()
        except Exception as _:
            loads = None
        return {'plugins_statuses': plugins_statuses, 'loads': loads, 'gc': gc.get_count()}
Ejemplo n.º 33
0
def main():
    print("Garbage Collector Test")
    print(" Live storage will peak at {} bytes."
          .format(2 * sys.getsizeof(Node()) * treeSize(kLongLivedTreeDepth) + sys.getsizeof(0.0) * kArraySize))
    print(" Stretching memory with a binary tree of depth {}".format(kStretchTreeDepth))

    tStart = time.monotonic()

    # Stretch the memory space quickly
    tempTree = makeTree(kStretchTreeDepth)
    tempTree = None

    # Create a long lived object
    print(" Creating a long-lived binary tree of depth {}".format(kLongLivedTreeDepth))

    longLivedTree = Node()
    populate(kLongLivedTreeDepth, longLivedTree)

    # Create long-lived array, filling half of it
    print(" Creating a long-lived array of {} doubles".format(kArraySize))

    longLivedArray = array('d', [0.0] + [1.0 / x for x in range(1, kArraySize)])

    for d in range(kMinTreeDepth, kMaxTreeDepth+1, 2):
        timeConstruction(d)

    if longLivedTree is None or longLivedArray[1000] != 1.0 / 1000:
        print("Failed")

    tFinish = time.monotonic()

    print("Completed in {} ms".format(timeElapsed(tStart, tFinish)))
    print("Completed {} collections".format(sum(gc.get_count())))
Ejemplo n.º 34
0
    def status(self, mess, args):
        """ If I am alive I should be able to respond to this one
        """
        all_blacklisted = self.get_blacklisted_plugin()
        all_loaded = get_all_active_plugin_names()
        all_attempted = sorted([p.name for p in self.all_candidates])
        plugins_statuses = []
        for name in all_attempted:
            if name in all_blacklisted:
                plugins_statuses.append(("B", name))
            elif name in all_loaded:
                plugins_statuses.append(("L", name))
            elif (
                get_plugin_obj_by_name(name) is not None
                and get_plugin_obj_by_name(name).get_configuration_template() is not None
                and self.get_plugin_configuration(name) is None
            ):
                plugins_statuses.append(("C", name))
            else:
                plugins_statuses.append(("E", name))

        try:
            from posix import getloadavg

            loads = getloadavg()
        except Exception as e:
            loads = None
        return {"plugins_statuses": plugins_statuses, "loads": loads, "gc": gc.get_count()}
Ejemplo n.º 35
0
    def _collect_gc_metrics(self, plugin_data, with_snapshot):
        try:
            gc_count = gc.get_count()
            gc_threshold = gc.get_threshold()

            self.apply_delta(gc_count[0],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'], "collect0",
                             with_snapshot)
            self.apply_delta(gc_count[1],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'], "collect1",
                             with_snapshot)
            self.apply_delta(gc_count[2],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'], "collect2",
                             with_snapshot)

            self.apply_delta(gc_threshold[0],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'],
                             "threshold0", with_snapshot)
            self.apply_delta(gc_threshold[1],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'],
                             "threshold1", with_snapshot)
            self.apply_delta(gc_threshold[2],
                             self.previous['data']['metrics']['gc'],
                             plugin_data['data']['metrics']['gc'],
                             "threshold2", with_snapshot)
        except Exception:
            logger.debug("_collect_gc_metrics", exc_info=True)
Ejemplo n.º 36
0
def gc_dump():
    """Dump GC usage."""
    try:
        dump_dir = settings.LOG_FOLDER
        tstamp = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
        fname = os.path.join(dump_dir, 'gcdump-%s.txt' % (tstamp,))
        fh = open(fname, "w")

        # count
        count = gc.get_count()
        fh.write("gc.get_count():\n%s\n" % (count,))

        # garbage
        fh.write("gc.garbage:\n")
        c = 0
        for x in gc.garbage:
            c += 1
            try:
                line = repr(x)
            except Exception as e:
                line = "Error str'ing an object: " + str(e)
            fh.write(line + "\n")
        fh.close()
        m = 'GC count is %s and %d garbage items written to: %s' % (
            count, c, fname)
        return m
    except Exception as e:
        return "Error while trying to dump GC: %s" % (e,)
Ejemplo n.º 37
0
 def stats(self):
     hub = gevent.get_hub()
     threadpool, loop = hub.threadpool, hub.loop
     s = {
         'endpoint': self.endpoint,
         'identity': self.identity,
         'greenlets': len(self.pool),
         'service': self.service_name,
         'gevent': {
             'threadpool': {
                 'size': threadpool.size,
                 'maxsize': threadpool.maxsize,
             },
             'active': loop.activecnt,
             'pending': loop.pendingcnt,
             'iteration': loop.iteration,
             'depth': loop.depth,
         },
         'gc': {
             'garbage': len(gc.garbage),
             'collections': gc.get_count(),
         },
         'rpc': self.rpc_stats(),
         'connections': [c.stats() for c in self.connections.values()],
     }
     for name, interface in six.iteritems(self.installed_interfaces):
         s[name] = interface.stats()
     return s
Ejemplo n.º 38
0
def create_system_counters():
    import gc
    for i in [0,1,2]:
        Gauge('gc_count_{}'.format(i), lambda: gc.get_count()[int(i)])
    Gauge('gc_count_objects', lambda: len(gc.get_objects()))
    Gauge('self', procstat.get_self)
    Gauge('cpu', procstat.get_cpu)
Ejemplo n.º 39
0
def main():
    a = 4
    b = 5

    c_list = []
    c_list.append(123)
    c_list.append(456)
    # reference cycle
    c_list.append(c_list)
    c_list[2].append(789)

    # foo = ['hi']
    # c_list = foo

    print(c_list)

    print("Stats: {}".format(gc.get_stats()))
    print("Count: {}".format(gc.get_count()))
    print("GC enabled: {}".format(gc.isenabled()))
    print("Threshold: {}".format(gc.get_threshold()))
    print("c_list is tracked: {}".format(gc.is_tracked(c_list)))
    """
    The count returned is generally one higher than you might expect,
    because it includes the (temporary) reference as an argument to getrefcount().
    """
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))
    del c_list[2]
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))

    print("Collecting: {}".format(gc.collect()))

    print("Done.")
Ejemplo n.º 40
0
    def status(self, mess, args):
        """ If I am alive I should be able to respond to this one
        """
        all_blacklisted = holder.bot.get_blacklisted_plugin()
        all_loaded = get_all_active_plugin_names()
        all_attempted = sorted([p.name for p in holder.bot.all_candidates])
        plugins_statuses = []
        for name in all_attempted:
            if name in all_blacklisted:
                if name in all_loaded:
                    plugins_statuses.append(('BL', get_plugin_by_name(name).category, name))
                else:
                    plugins_statuses.append(('BU', name))
            elif name in all_loaded:
                plugins_statuses.append(('L', get_plugin_by_name(name).category, name))
            elif get_plugin_obj_by_name(name) is not None:
                plugins_statuses.append(('C', get_plugin_by_name(name).category, name))
            else:
                plugins_statuses.append(('U', name))

        #noinspection PyBroadException
        try:
            from posix import getloadavg

            loads = getloadavg()
        except Exception as _:
            loads = None

        # plugins_statuses = sorted(plugins_statuses, key=lambda c: c[2])
        return {'plugins': plugins_statuses, 'loads': loads, 'gc': gc.get_count()}
Ejemplo n.º 41
0
    async def add_object(
            self, obj, site, loads, security=False, response=None):

        serialization = None
        if response is not None and hasattr(obj, 'id'):
            response.write(
                b'Object %s Security %r Buffer %d\n' %
                (obj.id.encode('utf-8'), security, len(loads)))
        try:
            if security:
                serialization = ISecurityInfo(obj)()
            else:
                serialization = ICatalogDataAdapter(obj)()
            loads[obj.uuid] = serialization
        except TypeError:
            pass

        if len(loads) >= self.bulk_size:
            if response is not None:
                response.write(b'Going to reindex\n')
            await self.reindex_bunk(
                site, loads, update=security, response=response)
            if response is not None:
                response.write(b'Indexed %d\n' % len(loads))
            loads.clear()
            num, _, _ = gc.get_count()
            gc.collect()
            total_memory = round(
                resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0/1024.0,1)
            if response is not None:
                response.write(b'GC cleaned %d\n' % num)
                response.write(b'Memory usage         : % 2.2f MB\n' % total_memory)
Ejemplo n.º 42
0
    def get_info(self):
        peak, size, rss = self.get_vmsize()
        cputime = self.get_cputime()
        runtime = datetime.datetime.now() - self._start_time
        r = ((runtime.days * 86400 + runtime.seconds) * 1000000 + runtime.microseconds) / 10000

        return { 
            'name' : self._name,
            'version' : self._version,
            'status' : self._status,
            'load_now' : self._load,
            'load_limit' : self._limit,
            'sequence' : self._seq,
            'start_time' : str(self._start_time),
            'current_time' : str(datetime.datetime.now()),
            'run_time' : str(datetime.datetime.now() - self._start_time),
            'dir' : self._dir,
            'auto_update' : self._auto_update,
            'next_update' : str(self._next_update),
            'heartbeat' : self._heartbeat,
            'md5_base' : self._md5_base,
            'md5_interface' : self._md5_interface,
            'vmpeak' : peak,
            'vmsize' : size,
            'vmrss' : rss,
            'gc' : str(gc.get_count()),
            'cpu' : round(float(cputime * 100) / r, 4),
        }
Ejemplo n.º 43
0
def main():
    a = 4
    b = 5

    c_list = []
    c_list.append(123)
    c_list.append(456)
    # reference cycle
    c_list.append(c_list)
    c_list[2].append(789)

    # foo = ['hi']
    # c_list = foo

    print(c_list)

    print("Stats: {}".format(gc.get_stats()))
    print("Count: {}".format(gc.get_count()))
    print("GC enabled: {}".format(gc.isenabled()))
    print("Threshold: {}".format(gc.get_threshold()))
    print("c_list is tracked: {}".format(gc.is_tracked(c_list)))

    """
    The count returned is generally one higher than you might expect,
    because it includes the (temporary) reference as an argument to getrefcount().
    """
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))
    del c_list[2]
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))

    print("Collecting: {}".format(gc.collect()))

    print("Done.")
Ejemplo n.º 44
0
    def _maybe_gc() -> None:
        # Check if we need to do a manual GC (since its been disabled), and do
        # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may
        # promote an object into gen 2, and we don't want to handle the same
        # object multiple times.
        threshold = gc.get_threshold()
        counts = gc.get_count()
        end = time.time()
        for i in (2, 1, 0):
            # We check if we need to do one based on a straightforward
            # comparison between the threshold and count. We also do an extra
            # check to make sure that we don't a GC too often.
            if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[
                    i] < end - _last_gc[i]:
                if i == 0:
                    logger.debug("Collecting gc %d", i)
                else:
                    logger.info("Collecting gc %d", i)

                start = time.time()
                unreachable = gc.collect(i)
                end = time.time()

                _last_gc[i] = end

                gc_time.labels(i).observe(end - start)
                gc_unreachable.labels(i).set(unreachable)
Ejemplo n.º 45
0
def stats():
    global _memstat_enabled, _can_enable
    if not _memstat_enabled:
        return
    if not _can_enable:
        logging.warning('could not enable memstat')
        _memstat_enabled = False
        return
    try:
        s0, s1, s2 = gc.get_count()
        usage = resource.getrusage(resource.RUSAGE_SELF)
        kb = usage.ru_maxrss
        if kb == 0:
            kb = my_getrss()
        _frame = inspect.currentframe()
        frame = _frame.f_back
        fname = frame.f_code.co_filename
        fnum = frame.f_lineno
        logging.info('memstat:%s:%d: rss_kb: %d gb_stages: %d %d %d' % \
                     (fname, fnum, kb, s0, s1, s2))
    except:
        print(sys.exc_info())
        traceback.print_exc()
        logging.warning('something went wrong with memstat, disabling')
        _can_enable = False
        _memstat_enabled = False
    finally:
        # Necessary to avoid cyclic references and leak memory!
        del frame
        del _frame
Ejemplo n.º 46
0
def gc_callback(phase: str, info: Dict[str, Any]):
    # pylint: disable=global-statement
    global _gc_start
    gc_start = _gc_start

    if phase == "start" and gc_start is None:
        _gc_start = time.time()
    elif gc_start is not None:
        duration = time.time() - gc_start
        _gc_start = None

        if node_stats_service.node is not None:
            node_stats_service.log_gc_duration(info["generation"], duration)
        gen0, gen1, gen2 = gc.get_count()
        if duration >= constants.GC_DURATION_WARN_THRESHOLD:
            logger.statistics({
                "type": "GcDurationExceededWarningThreshold",
                "start_time": datetime.fromtimestamp(time.time()),
                "duration": duration,
                "generation": info["generation"],
                "collected": info["collected"],
                "uncollectable": info["uncollectable"],
                "total_uncollectable": len(gc.garbage),
                "sizes": {
                    "generation0": gen0,
                    "generation1": gen1,
                    "generation2": gen2
                },
            })
    else:
        logger.debug("invalid state when attempting to track GC state skip")
Ejemplo n.º 47
0
    def __init__(self, *args, **kwargs):

        super().__init__(*args, **kwargs)

        self.log(
            "Tweaking garbage collection. Is it currently turned on? %s\n"
            "Current thresholds: %s\n"
            "Current counts: %s\n"
            "Enabling GB debug output (check stderr)\n"
            "Enabling tracemalloc",
            (str(gc.isenabled()), repr(gc.get_threshold()), repr(
                gc.get_count())))

        gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_COLLECTABLE
                     | gc.DEBUG_UNCOLLECTABLE)
        tracemalloc.start()

        for i in range(3):
            self.log("Forcing collection of generation %s...", str(i))
            gc.collect(i)

        unclaimed = [
            'A total of %s objects that could not be freed:' % len(gc.garbage)
        ]

        for i in gc.garbage:
            unclaimed.append('%s: %s (%s)' % (type(i), str(i), repr(i)))

        self.log('\n'.join(unclaimed))
        self.log("Done.")
Ejemplo n.º 48
0
  def update_status_in_thread(self):
    # TODO: make sure performance is not a problem as current approach queries
    # database many times.
    """
    tasks = get_tasks_by_service(service_id)
    clusters = []
    for task in tasks:
      if task.job.cluster not in clusters:
        clusters.append(task.job.cluster)
    """
    logger.info("updating clusters status, "
        "%d task in queue, %d workers, %d total threads",
        reactor.getThreadPool().q.qsize(),
        len(reactor.getThreadPool().working),
        len(reactor.getThreadPool().threads))

    try:
      self.start_time = time.time()
      for cluster in Cluster.objects.filter(active=True).all():
        self.update_cluster_status(cluster)
      logger.info("spent %f seconds for updating clusters status",
          time.time() - self.start_time)
      logger.info("gc: %r", gc.get_count())
      logger.info("usage: %r", resource.getrusage(resource.RUSAGE_SELF))
    except Exception as e:
      logger.warning("%Failed to update statu: %r", e)
    finally:
      # reactor.callLater is NOT thread-safe but reactor.callFromThread is, so we
      # put the callLater to the main loop.
      reactor.callFromThread(
        reactor.callLater, self.collector_config.period, self.update_status)
Ejemplo n.º 49
0
    def status(self, mess, args):
        """ If I am alive I should be able to respond to this one
        """
        all_blacklisted = self.get_blacklisted_plugin()
        all_loaded = get_all_active_plugin_names()
        all_attempted = sorted([p.name for p in self.all_candidates])
        plugins_statuses = []
        for name in all_attempted:
            if name in all_blacklisted:
                plugins_statuses.append(('B', name))
            elif name in all_loaded:
                plugins_statuses.append(('L', name))
            elif get_plugin_obj_by_name(
                    name) is not None and get_plugin_obj_by_name(
                        name).get_configuration_template(
                        ) is not None and self.get_plugin_configuration(
                            name) is None:
                plugins_statuses.append(('C', name))
            else:
                plugins_statuses.append(('E', name))

        #noinspection PyBroadException
        try:
            from posix import getloadavg

            loads = getloadavg()
        except Exception as _:
            loads = None
        return {
            'plugins_statuses': plugins_statuses,
            'loads': loads,
            'gc': gc.get_count()
        }
Ejemplo n.º 50
0
def stats():
    global _memstat_enabled, _can_enable
    if not _memstat_enabled:
        return
    if not _can_enable:
        logging.warning('could not enable memstat')
        _memstat_enabled = False
        return
    try:
        s0, s1, s2 = gc.get_count()
        usage = resource.getrusage(resource.RUSAGE_SELF)
        kb = usage.ru_maxrss
        if kb == 0:
            kb = my_getrss()
        _frame = inspect.currentframe()
        frame = _frame.f_back
        fname = frame.f_code.co_filename
        fnum = frame.f_lineno
        logging.info('memstat:%s:%d: rss_kb: %d gb_stages: %d %d %d' % \
                     (fname, fnum, kb, s0, s1, s2))
    except:
        log_lines(sys.exc_info(), logging.debug)
        log_lines(traceback.format_exc(), logging.debug)
        logging.warning('something went wrong with memstat, disabling')
        _can_enable = False
        _memstat_enabled = False
    finally:
        # Necessary to avoid cyclic references and leak memory!
        del frame
        del _frame
Ejemplo n.º 51
0
 def _make_html_response(self, results, healthy):
     try:
         hostname = socket.gethostname()
     except socket.error:
         hostname = None
     translated_results = []
     for result in results:
         translated_results.append({
             'details': result.details or '',
             'reason': result.reason,
             'class': reflection.get_class_name(result,
                                                fully_qualified=False),
         })
     params = {
         'healthy': healthy,
         'hostname': hostname,
         'results': translated_results,
         'detailed': self._show_details,
         'now': str(timeutils.utcnow()),
         'python_version': sys.version,
         'platform': platform.platform(),
         'gc': {
             'counts': gc.get_count(),
             'threshold': gc.get_threshold(),
          },
          'threads': self._get_threadstacks(),
          'greenthreads': self._get_threadstacks(),
     }
     body = _expand_template(self.HTML_RESPONSE_TEMPLATE, params)
     return (body.strip(), 'text/html')
Ejemplo n.º 52
0
 def __init__(self):
     ui.basicpage.__init__(self)
     self.addstr(1,1,"This till is locked.")
     self.updateheader()
     self._y=3
     unsaved=[p for p in ui.basicpage._pagelist if p!=self]
     if unsaved:
         self.line("The following users have unsaved work "
                   "on this terminal:")
         for p in unsaved:
             self.line("  {} ({})".format(p.pagename(),p.unsaved_data))
         self.line("")
     else:
         # The till is idle - schedule an exit if configured
         if tillconfig.idle_exit_code is not None:
             event.eventlist.append(self)
             self.nexttime = max(
                 tillconfig.start_time + tillconfig.minimum_run_time,
                 time.time() + tillconfig.minimum_lock_screen_time)
     rpproblem=printer.driver.offline()
     if rpproblem:
         self.line("Receipt printer problem: {}".format(rpproblem))
         log.info("Receipt printer problem: %s",rpproblem)
     kpproblem=foodorder.kitchenprinter.offline()
     if kpproblem:
         self.line("Kitchen printer problem: {}".format(kpproblem))
         log.info("Kitchen printer problem: %s",kpproblem)
     self.addstr(self.h-1,0,"Till version: {}".format(version.version))
     self.move(0, 0)
     log.info("lockpage gc stats: %s, len(gc.garbage)=%d",gc.get_count(),
              len(gc.garbage))
Ejemplo n.º 53
0
 def _check_timers(self, eventtime, busy):
     if eventtime < self._next_timer:
         if busy:
             return 0.
         if self._check_gc:
             gi = gc.get_count()
             if gi[0] >= 700:
                 # Reactor looks idle and gc is due - run it
                 gc_level = 0
                 if gi[1] >= 10:
                     gc_level = 1
                     if gi[2] >= 10:
                         gc_level = 2
                 self._last_gc_times[gc_level] = eventtime
                 gc.collect(gc_level)
                 return 0.
         return min(1., max(.001, self._next_timer - eventtime))
     self._next_timer = self.NEVER
     g_dispatch = self._g_dispatch
     for t in self._timers:
         waketime = t.waketime
         if eventtime >= waketime:
             t.waketime = self.NEVER
             t.waketime = waketime = t.callback(eventtime)
             if g_dispatch is not self._g_dispatch:
                 self._next_timer = min(self._next_timer, waketime)
                 self._end_greenlet(g_dispatch)
                 return 0.
         self._next_timer = min(self._next_timer, waketime)
     return 0.
Ejemplo n.º 54
0
 def _make_json_response(self, results, healthy):
     if self._show_details:
         body = {
             'detailed': True,
             'python_version': sys.version,
             'now': str(timeutils.utcnow()),
             'platform': platform.platform(),
             'gc': {
                 'counts': gc.get_count(),
                 'threshold': gc.get_threshold(),
             },
         }
         reasons = []
         for result in results:
             reasons.append({
                 'reason': result.reason,
                 'details': result.details or '',
                 'class': reflection.get_class_name(result,
                                                    fully_qualified=False),
             })
         body['reasons'] = reasons
         body['greenthreads'] = self._get_greenstacks()
         body['threads'] = self._get_threadstacks()
     else:
         body = {
             'reasons': [result.reason for result in results],
             'detailed': False,
         }
     return (self._pretty_json_dumps(body), 'application/json')
Ejemplo n.º 55
0
def get_gc_info():
    import gc
    ret = {}
    ret['is_enabled'] = gc.isenabled()
    ret['thresholds'] = gc.get_threshold()
    ret['counts'] = gc.get_count()
    ret['obj_count'] = len(gc.get_objects())
    return ret
Ejemplo n.º 56
0
    def closeEvent(self, event):
        """ Shut down the program, give some debug info
        :param event:
        """
        QtWidgets.QMainWindow.closeEvent(self, event)
        if ctrl.print_garbage:
            # import objgraph
            log.debug('garbage stats: ' + str(gc.get_count()))
            gc.collect()
            log.debug('after collection: ' + str(gc.get_count()))
            if gc.garbage:
                log.debug('garbage: ' + str(gc.garbage))

                # objgraph.show_most_common_types(limit =40)
        if self.save_prefs:
            prefs.save_preferences()
        log.info('...done')
Ejemplo n.º 57
0
 def test_gc_create1(self):
     gc.collect()
     cnt1 = gc.get_count()
     A = make_class("A", fields=2, gc=True)
     B = make_class("B", varsize=True, gc=True)
     C = make_class("C", fields=2, varsize=True, gc=True)
     b = A([], ())
     c = C(1,2,{1:2,3:4},[1,2])
     b1 = B(1, b)
     a = [b1, c]
     cnt2 = gc.get_count()
     self.assertEqual(gc.is_tracked(b), True)
     self.assertEqual(gc.is_tracked(b1), True)
     self.assertEqual(gc.is_tracked(c), True)
     del a
     gc.collect()
     cnt3 = gc.get_count()
     self.assertEqual(cnt1, cnt3)
Ejemplo n.º 58
0
    def _DrawDebugInfo(self,dtime):
        """Dump debug information (i.e. entity stats) to the upper right
        corner of the window"""

        if not hasattr(self,"debug_info_font"):
            self.debug_info_font = FontCache.get(defaults.letter_height_debug_info,face=defaults.font_debug_info)
            
        if self.level:
            entity_count,entities_active,entities_visible,entities_nowindow = self.level.GetEntityStats()
        else:
            entity_count,entities_active,entities_visible,entities_nowindow = -1,-1,-1,-1
            
        drawables_global = len(Renderer.GetDrawables())
        fps = 1.0/dtime

        import gc
        gcc = gc.get_count()

        scaled_score = self.score/100
        
        # this is expensive, but we will survive it.
        locdef = locals().copy()
        locdef.update(defaults.__dict__)
        locdef.update(self.__dict__)

        
        text = """
EntitiesTotal:     {entity_count}
EntitiesActive:    {entities_active}
EntitiesVisible:   {entities_visible}
DrawCalls:         {draw_counter}
EntitiesNoWindow:  {entities_nowindow}
DrawablesGlobal:   {drawables_global}
GCCollections:     {gcc}
GodMode:           {debug_godmode}
UpDownMove:        {debug_updown_move}
PreventFallDown:   {debug_prevent_fall_down}
ShowBoundingBoxes: {debug_draw_bounding_boxes}
ScrollBoth:        {debug_scroll_both}
ScrollSpeed:       {move_map_speed}
SpeedScale/Round:  {speed_scale_per_round}
SpeedScale:        {speed_scale}
LevelSize:         {level_size}

TimeDelta:         {dtime:.4}
1/TimeDelta:       {fps:.4}

MoneyAmount:       {scaled_score} $

""".format(**locdef)
        
        s = sf.String(text,Font=self.debug_info_font,\
            Size=defaults.letter_height_debug_info)

        s.SetPosition(defaults.resolution[0]-302,140)
        s.SetColor(sf.Color.White)
        self.DrawSingle(s)
Ejemplo n.º 59
0
    def GET(self):
        web.header('Content-Type','text/plain', unique=True)
        web.header('Refresh','3')
        s = []
        resources = dict(zip(
                ('utime', 'stime', 'maxrss', 'ixrss','idrss','isrss','minflt','majflt','nswap','inblock','outblock','msgsnd','msgrcv','nsignals','nvcsw','nivcsw'),
                resource.getrusage(resource.RUSAGE_SELF)))

        pid, pagesize = os.getpid(), resource.getpagesize()

        vm, rm, sm = meminfo.memory()
        gc0, gc1, gc2 = gc.get_count()
        s.append(
        '''######## PID:%(pid)i  total events:%(event_counter)i  python objects, unreachable:%(gc_unreachable)i total:%(gc_objects)i dirty:%(gc0)i/%(gc1)i/%(gc2)i file descriptors:%(file_descriptors)i/%(max_descriptors)i \n'''
        '''######## virt memory:%(vm).0fMiB  res memory:%(rm).0fMiB  sys cpu time:%(sys).3fs  user:%(user).3fs context switches, voluntary:%(vcs)i  involuntary:%(ics)i \n''' %
            {
                'pid':pid,
                'event_counter':server.event_counter,
                'gc0': gc0,
                'gc1': gc1,
                'gc2': gc2,
                'gc_unreachable': len(gc.garbage),
                'gc_objects':len(gc.get_objects()),
                'file_descriptors':len(os.listdir('/proc/%i/fd' % pid)),
                'max_descriptors':resource.getrlimit(resource.RLIMIT_NOFILE)[1],
                'vm': vm/1048576.0,
                'rm': rm/1048576.0,
                'sm': sm/1048576.0,
                'vcs':resources['nvcsw'],
                'ics':resources['nivcsw'],
                'sys':resources['stime'],
                'user':resources['utime'],
            },
        )

        for vhostdata in server.vhosts:
            s.append('''    **** %(host)s:%(port)i connections:%(counter)i broken:%(broken)i cpu_time:%(cpu_time).3fs ****\n''' % vhostdata)

            for clock, req in vhostdata['requests'].items():
                s.append('''        %(host)s:%(port)5s "%(method)s %(url)s %(http)s" %(status_code)3i %(content_length)8i/%(chunks)i/%(context_switches)i  (%(cpu_time).3fms)\n''' % {
                    'host':req.environ['REMOTE_HOST'],
                    'port':req.environ['REMOTE_PORT'],
                    'method': req.environ['REQUEST_METHOD'],
                    'url': req.get_url(),
                    'http': req.environ['SERVER_PROTOCOL'],
                    'status_code': req.out_dict['code'],
                    'content_length':req.content_length,
                    'chunks':req.chunks_number,
                    'context_switches':req.context_switches,
                    'cpu_time':req.all_cpu_time * 1000.0,
                })

            s.append('\n\n')

        s.append("took %.3fms\n" % ((time.time()-web.ctx.environ['wsgi.now']())*1000.0, ))
        return ''.join(s)