Exemplo n.º 1
0
def test_set_threshold():
    #the method has three arguments
    gc.set_threshold(0,-2,2)
    result = gc.get_threshold()
    AreEqual(result[0],0)
    AreEqual(result[1],-2)
    AreEqual(result[2],2)
    
    ##the method has two argument
    gc.set_threshold(0,128)
    result = gc.get_threshold()
    AreEqual(result[0],0)
    AreEqual(result[1],128)
    #CodePlex Work Item 8523
    #AreEqual(result[2],2)
    
   
    #the method has only one argument
    gc.set_threshold(-10009)
    result= gc.get_threshold()
    AreEqual(result[0],-10009)
    #CodePlex Work Item 8523
    #AreEqual(result[1],128)
    #AreEqual(result[2],2)
    
    #the argument is a random int
    for i in xrange(1,65535,6):
        gc.set_threshold(i)
        result = gc.get_threshold()
        AreEqual(result[0],i)
Exemplo n.º 2
0
    def test_set_threshold(self):
        """get_threshold, set_threshold"""

        #the method has three arguments
        gc.set_threshold(0,-2,2)
        result = gc.get_threshold()
        self.assertEqual(result[0],0)
        self.assertEqual(result[1],-2)
        self.assertEqual(result[2],2)
        
        ##the method has two argument
        gc.set_threshold(0,128)
        result = gc.get_threshold()
        self.assertEqual(result[0],0)
        self.assertEqual(result[1],128)
        #CodePlex Work Item 8523
        self.assertEqual(result[2],2)
        
    
        #the method has only one argument
        gc.set_threshold(-10009)
        result= gc.get_threshold()
        self.assertEqual(result[0],-10009)
        #CodePlex Work Item 8523
        self.assertEqual(result[1],128)
        self.assertEqual(result[2],2)
        
        #the argument is a random int
        for i in xrange(1,65535,6):
            gc.set_threshold(i)
            result = gc.get_threshold()
            self.assertEqual(result[0],i)
        
        #a argument is a float
        #CodePlex Work Item 8522
        self.assertRaises(TypeError,gc.set_threshold,2.1)
        self.assertRaises(TypeError,gc.set_threshold,3,-1.3)
        
        #a argument is a string
        #CodePlex Work Item 8522
        self.assertRaises(TypeError,gc.set_threshold,"1")
        self.assertRaises(TypeError,gc.set_threshold,"str","xdv#4")
        self.assertRaises(TypeError,gc.set_threshold,2,"1")
        self.assertRaises(TypeError,gc.set_threshold,31,-123,"asdfasdf","1")
        
        #a argument is a object
        #CodePlex Work Item 8522
        o  = object()
        o2 = object()
        self.assertRaises(TypeError,gc.set_threshold,o)
        self.assertRaises(TypeError,gc.set_threshold,o,o2)
        self.assertRaises(TypeError,gc.set_threshold,1,-123,o)
        o  = _random.Random()
        o2 = _random.Random()
        self.assertRaises(TypeError,gc.set_threshold,o)
        self.assertRaises(TypeError,gc.set_threshold,o,o2)
        self.assertRaises(TypeError,gc.set_threshold,8,64,o)
Exemplo n.º 3
0
    def check_gc_during_creation(self, makeref):
        thresholds = gc.get_threshold()
        gc.set_threshold(1, 1, 1)
        gc.collect()
        class A:
            pass

        def callback(*args):
            pass

        referenced = A()

        a = A()
        a.a = a
        a.wr = makeref(referenced)

        try:
            # now make sure the object and the ref get labeled as
            # cyclic trash:
            a = A()
            weakref.ref(referenced, callback)
            if test_support.due_to_ironpython_incompatibility():
                keepalive(referenced)

        finally:
            gc.set_threshold(*thresholds)
Exemplo n.º 4
0
    def check_gc_during_creation(self, makeref):
        if test_support.check_impl_detail():
            import gc
            thresholds = gc.get_threshold()
            gc.set_threshold(1, 1, 1)
        gc_collect()
        class A:
            pass

        def callback(*args):
            pass

        referenced = A()

        a = A()
        a.a = a
        a.wr = makeref(referenced)

        try:
            # now make sure the object and the ref get labeled as
            # cyclic trash:
            a = A()
            weakref.ref(referenced, callback)

        finally:
            if test_support.check_impl_detail():
                gc.set_threshold(*thresholds)
Exemplo n.º 5
0
 def _make_json_response(self, results, healthy):
     if self._show_details:
         body = {
             'detailed': True,
             'python_version': sys.version,
             'now': str(timeutils.utcnow()),
             'platform': platform.platform(),
             'gc': {
                 'counts': gc.get_count(),
                 'threshold': gc.get_threshold(),
             },
         }
         reasons = []
         for result in results:
             reasons.append({
                 'reason': result.reason,
                 'details': result.details or '',
                 'class': reflection.get_class_name(result,
                                                    fully_qualified=False),
             })
         body['reasons'] = reasons
         body['greenthreads'] = self._get_greenstacks()
         body['threads'] = self._get_threadstacks()
     else:
         body = {
             'reasons': [result.reason for result in results],
             'detailed': False,
         }
     return (self._pretty_json_dumps(body), 'application/json')
def main():
    a = 4
    b = 5

    c_list = []
    c_list.append(123)
    c_list.append(456)
    # reference cycle
    c_list.append(c_list)
    c_list[2].append(789)

    # foo = ['hi']
    # c_list = foo

    print(c_list)

    print("Stats: {}".format(gc.get_stats()))
    print("Count: {}".format(gc.get_count()))
    print("GC enabled: {}".format(gc.isenabled()))
    print("Threshold: {}".format(gc.get_threshold()))
    print("c_list is tracked: {}".format(gc.is_tracked(c_list)))

    """
    The count returned is generally one higher than you might expect,
    because it includes the (temporary) reference as an argument to getrefcount().
    """
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))
    del c_list[2]
    print("Reference count for c_list: {}".format(sys.getrefcount(c_list)))

    print("Collecting: {}".format(gc.collect()))

    print("Done.")
Exemplo n.º 7
0
def setUp(test):
    test.globs["print_function"] = print_function
    test.globs["saved-sys-info"] = (sys.path[:], sys.argv[:], sys.modules.copy())
    if hasattr(gc, "get_threshold"):
        test.globs["saved-gc-threshold"] = gc.get_threshold()
    test.globs["this_directory"] = os.path.split(__file__)[0]
    test.globs["testrunner_script"] = sys.argv[0]
Exemplo n.º 8
0
    def check_gc_during_creation(self, makeref):
        # gc.get/set_threshold does not exist e.g. in pypy
        thresholds = gc.get_threshold()
        gc.set_threshold(1, 1, 1)
        gc.collect()
        class A:
            pass

        def callback(*args):
            pass

        referenced = A()

        a = A()
        a.a = a
        a.wr = makeref(referenced)

        try:
            # now make sure the object and the ref get labeled as
            # cyclic trash:
            a = A()
            weakref.ref(referenced, callback)

        finally:
            gc.set_threshold(*thresholds)
Exemplo n.º 9
0
 def _make_html_response(self, results, healthy):
     try:
         hostname = socket.gethostname()
     except socket.error:
         hostname = None
     translated_results = []
     for result in results:
         translated_results.append({
             'details': result.details or '',
             'reason': result.reason,
             'class': reflection.get_class_name(result,
                                                fully_qualified=False),
         })
     params = {
         'healthy': healthy,
         'hostname': hostname,
         'results': translated_results,
         'detailed': self._show_details,
         'now': str(timeutils.utcnow()),
         'python_version': sys.version,
         'platform': platform.platform(),
         'gc': {
             'counts': gc.get_count(),
             'threshold': gc.get_threshold(),
          },
          'threads': self._get_threadstacks(),
          'greenthreads': self._get_threadstacks(),
     }
     body = _expand_template(self.HTML_RESPONSE_TEMPLATE, params)
     return (body.strip(), 'text/html')
Exemplo n.º 10
0
    def check_gc_during_creation(self, makeref):
        # XXX: threshold not applicable to Jython
        if not test_support.is_jython:
            thresholds = gc.get_threshold()
            gc.set_threshold(1, 1, 1)
        gc.collect()
        class A:
            pass

        def callback(*args):
            pass

        referenced = A()

        a = A()
        a.a = a
        a.wr = makeref(referenced)

        try:
            # now make sure the object and the ref get labeled as
            # cyclic trash:
            a = A()
            weakref.ref(referenced, callback)

        finally:
            # XXX: threshold not applicable to Jython
            if not test_support.is_jython:
                gc.set_threshold(*thresholds)
Exemplo n.º 11
0
def gc_callback(option, opt, GC_THRESHOLD, *args):
    import gc
    if GC_THRESHOLD == 0:
        gc.disable()
        print "gc disabled"
    else:
        gc.set_threshold(GC_THRESHOLD)
        print "gc threshold:", gc.get_threshold()
Exemplo n.º 12
0
	def get_gc_threshold(self):
		"""
		Unknown

		@return: Unknown
		@rtype: Unknwon
		"""
		return gc.get_threshold()
Exemplo n.º 13
0
def get_gc_info():
    import gc
    ret = {}
    ret['is_enabled'] = gc.isenabled()
    ret['thresholds'] = gc.get_threshold()
    ret['counts'] = gc.get_count()
    ret['obj_count'] = len(gc.get_objects())
    return ret
Exemplo n.º 14
0
def main():
	parser=argparse.ArgumentParser(description="access to pythons built-in garbage collector")
	parser.add_argument("command",help="what to do",choices=["enable","disable","status","collect","threshold","debug","break"],action="store")
	parser.add_argument("args",help="argument for command",action="store",nargs="*")
	ns=parser.parse_args()
	if ns.command=="enable":
		gc.enable()
	elif ns.command=="disable":
		gc.disable()
	elif ns.command=="collect":
		gc.collect()
	elif ns.command=="status":
		print "GC enabled:              {s}".format(s=gc.isenabled())
		tracked=gc.get_objects()
		n=len(tracked)
		print "Tracked objects:         {n}".format(n=n)
		size=sum([sys.getsizeof(e) for e in tracked])
		del tracked#this list may be big, better delete it
		print "Size of tracked objects: {s} bytes".format(s=size)
		print "Garbage:                 {n}".format(n=len(gc.garbage))
		gsize=sum([sys.getsizeof(e) for e in gc.garbage])
		print "Size of garbage:         {s} bytes".format(s=gsize)
		print "Debug:                   {d}".format(d=gc.get_debug())
	elif ns.command=="threshold":
		if len(ns.args)==0:
			print "Threshold:\n   G1: {}\n   G2: {}\n   G3: {}".format(*gc.get_threshold())
		elif len(ns.args)>3:
			print "Error: to many arguments for threshold!"
			sys.exit(1)
		else:
			try:
				ts=tuple([int(e) for e in ns.args])
			except ValueError:
				print "Error: expected arguments to be integer!"
				sys.exit(1)
			gc.set_threshold(*ts)
	elif ns.command=="debug":
		if len(ns.args)==0:
			print "Debug: {d}".format(d=gc.get_debug())
		elif len(ns.args)==1:
			try:
				flag=int(ns.args[0])
			except ValueError:
				print "Error: expected argument to be an integer!"
				sys.exit(1)
			gc.set_debug(flag)
		else:
			print "Error: expected exactly one argument for threshold!"
			sys.exit(1)
	elif ns.command=="break":
		if len(gc.garbage)==0:
			print "Error: No Garbage found!"
			sys.exit(1)
		else:
			for k in dir(garbage[0]):
				try: delattr(garbage,k)
				except: pass
			del gc.garbage[:]
Exemplo n.º 15
0
 def __init__(self, parent=None, debug=False):
     super(GarbageCollector, self).__init__(parent)
     self.debug = debug
     self.timer = QTimer()
     self.timer.timeout.connect(self.check)
     self.threshold = gc.get_threshold()
     # gc.set_debug(gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_OBJECTS)
     gc.disable()
     self.timer.start(self.INTERVAL)
Exemplo n.º 16
0
    def __init__(self):
        QtCore.QObject.__init__(self, None)

        self.timer = QtCore.QTimer(self)
        self.timer.timeout.connect(self.check)

        self.threshold = gc.get_threshold()
        gc.disable()
        self.timer.start(self.INTERVAL)
Exemplo n.º 17
0
def setUp(test):
    test.globs['saved-sys-info'] = (
        sys.path[:],
        sys.argv[:],
        sys.modules.copy(),
        gc.get_threshold(),
        )
    test.globs['this_directory'] = os.path.split(__file__)[0]
    test.globs['testrunner_script'] = sys.argv[0]
Exemplo n.º 18
0
def main():
    print "Garbage collection thresholds: %r,%r,%r" % gc.get_threshold()
    collected = gc.collect()
    print "Garbage collector: collected %d objects." % (collected)
    print "Creating cycles..."
    for i in range(10000):
        make_cycle()
    collected = gc.collect()
    print "Garbage collector: collected %d objects." % (collected)
Exemplo n.º 19
0
def get_gc_info():
    import gc
    ret = {}
    ret['is_enabled'] = gc.isenabled()
    ret['thresholds'] = gc.get_threshold()

    ret['counts'] = glom(gc, T.get_count(), skip_exc=Exception)
    ret['obj_count'] = glom(gc, T.get_objects().__len__(), skip_exc=Exception)

    return ret
Exemplo n.º 20
0
    def __init__(self, parent, debug=False):
        QObject.__init__(self, parent)
        self.debug = debug

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.check)

        self.threshold = gc.get_threshold()
        gc.disable()
        self.timer.start(self.INTERVAL)
    def __init__(self, interval=1.0, debug=False):
        self.debug = debug
        if debug:
            gc.set_debug(gc.DEBUG_LEAK)

        self.timer = QtCore.QTimer()
        self.timer.timeout.connect(self.check)

        self.threshold = gc.get_threshold()
        gc.disable()
        self.timer.start(interval * 1000)
Exemplo n.º 22
0
 def setUp(test):
     test.globs['saved-sys-info'] = (
         sys.path[:],
         sys.argv[:],
         sys.modules.copy(),
         gc.get_threshold(),
         )
     test.globs['this_directory'] = os.path.split(__file__)[0]
     test.globs['testrunner_script'] = __file__
     test.globs['get_basenames_from_suite'] = get_basenames_from_suite
     test.globs['print_file'] = print_file
Exemplo n.º 23
0
 def test_implicit_parent_with_threads(self):
     if not gc.isenabled():
         return # cannot test with disabled gc
     N = gc.get_threshold()[0]
     if N < 50:
         return # cannot test with such a small N
     def attempt():
         lock1 = threading.Lock()
         lock1.acquire()
         lock2 = threading.Lock()
         lock2.acquire()
         recycled = [False]
         def another_thread():
             lock1.acquire() # wait for gc
             greenlet.getcurrent() # update ts_current
             lock2.release() # release gc
         t = threading.Thread(target=another_thread)
         t.start()
         class gc_callback(object):
             def __del__(self):
                 lock1.release()
                 lock2.acquire()
                 recycled[0] = True
         class garbage(object):
             def __init__(self):
                 self.cycle = self
                 self.callback = gc_callback()
         l = []
         x = range(N*2)
         current = greenlet.getcurrent()
         g = garbage()
         for i in x:
             g = None # lose reference to garbage
             if recycled[0]:
                 # gc callback called prematurely
                 t.join()
                 return False
             last = greenlet()
             if recycled[0]:
                 break # yes! gc called in green_new
             l.append(last) # increase allocation counter
         else:
             # gc callback not called when expected
             gc.collect()
             if recycled[0]:
                 t.join()
             return False
         self.assertEqual(last.parent, current)
         for g in l:
             self.assertEqual(g.parent, current)
         return True
     for i in range(5):
         if attempt():
             break
Exemplo n.º 24
0
def setUp(test):
    test.globs['print_function'] = print_function
    test.globs['saved-sys-info'] = (
        sys.path[:],
        sys.argv[:],
        sys.modules.copy(),
    )
    if hasattr(gc, 'get_threshold'):
        test.globs['saved-gc-threshold'] = gc.get_threshold()
    test.globs['this_directory'] = os.path.split(__file__)[0]
    test.globs['testrunner_script'] = sys.argv[0]
Exemplo n.º 25
0
 def update_gc_stat(self):
   try:
     c1, c2, c3 = gc.get_count()
     t1, t2, t3 = gc.get_threshold()
     self.g1_count.update(c1)
     self.g2_count.update(c2)
     self.g3_count.update(c3)
     self.g1_threshold.update(t1)
     self.g2_threshold.update(t2)
     self.g3_threshold.update(t3)
   except Exception as e:
     Log.error(traceback.format_exc(e))
Exemplo n.º 26
0
 def collect_garbage(self):
     (count0, count1, count2) = gc.get_count()
     (threshold0, threshold1, threshold2) = gc.get_threshold()
     object_count = len(gc.get_objects())
     referrers_count = len(gc.get_referrers())
     referents_count = len(gc.get_referents())
     self.registry.gauge("python.gc.collection.count0").set_value(count0)
     self.registry.gauge("python.gc.collection.count1").set_value(count1)
     self.registry.gauge("python.gc.collection.count2").set_value(count2)
     self.registry.gauge("python.gc.objects.count").set_value(object_count)
     self.registry.gauge("python.gc.referrers.count").set_value(referrers_count)
     self.registry.gauge("python.gc.referents.count").set_value(referents_count)
Exemplo n.º 27
0
    def __init__(self, parent: QObject, debug: bool = False,
                 interval_ms=10000) -> None:
        super().__init__(parent)
        self.debug = debug
        self.interval_ms = interval_ms

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.check)

        self.threshold = gc.get_threshold()
        gc.disable()
        self.timer.start(self.interval_ms)
Exemplo n.º 28
0
    def global_setup(self):
        self.old_threshold = gc.get_threshold()

        if self.threshold[0]:
            self.runner.options.output.info(
                "Cyclic garbage collection threshold set to: %s" %
                repr(tuple(self.threshold)))
        else:
            self.runner.options.output.info(
                "Cyclic garbage collection is disabled.")

        gc.set_threshold(*self.threshold)
Exemplo n.º 29
0
    def f(*args, **kwargs):
        now = reactor.seconds()
        num_pending = 0

        # _newTimedCalls is one long list of *all* pending calls. Below loop
        # is based off of impl of reactor.runUntilCurrent
        for delayed_call in reactor._newTimedCalls:
            if delayed_call.time > now:
                break

            if delayed_call.delayed_time > 0:
                continue

            num_pending += 1

        num_pending += len(reactor.threadCallQueue)
        start = time.time()
        ret = func(*args, **kwargs)
        end = time.time()

        # record the amount of wallclock time spent running pending calls.
        # This is a proxy for the actual amount of time between reactor polls,
        # since about 25% of time is actually spent running things triggered by
        # I/O events, but that is harder to capture without rewriting half the
        # reactor.
        tick_time.observe(end - start)
        pending_calls_metric.observe(num_pending)

        # Update the time we last ticked, for the metric to test whether
        # Synapse's reactor has frozen
        global last_ticked
        last_ticked = end

        if running_on_pypy:
            return ret

        # Check if we need to do a manual GC (since its been disabled), and do
        # one if necessary.
        threshold = gc.get_threshold()
        counts = gc.get_count()
        for i in (2, 1, 0):
            if threshold[i] < counts[i]:
                logger.info("Collecting gc %d", i)

                start = time.time()
                unreachable = gc.collect(i)
                end = time.time()

                gc_time.labels(i).observe(end - start)
                gc_unreachable.labels(i).set(unreachable)

        return ret
Exemplo n.º 30
0
 def _adjustGcThreshold(self, task):
     numGarbage = GarbageReport.checkForGarbageLeaks()
     if numGarbage == 0:
         self.gcNotify.debug('no garbage found, doubling gc threshold')
         a, b, c = gc.get_threshold()
         gc.set_threshold(min(a * 2, 1 << 30), b, c)
         task.delayTime = task.delayTime * 2
         retVal = Task.again
     else:
         self.gcNotify.warning('garbage found, reverting gc threshold')
         gc.set_threshold(*self._gcDefaultThreshold)
         retVal = Task.done
     return retVal
Exemplo n.º 31
0
def CollectGarbage():
    import gc
    #gc.set_debug(gc.DEBUG_SAVEALL)
    #gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
    from pprint import pprint
    print "threshold:", gc.get_threshold()
    print "unreachable object count:", gc.collect()
    garbageList = gc.garbage[:]
    for i, obj in enumerate(garbageList):
        print "Object Num %d:" % i
        pprint(obj)
        #print "Referrers:"
        #print(gc.get_referrers(o))
        #print "Referents:"
        #print(gc.get_referents(o))
    print "Done."
Exemplo n.º 32
0
def test_gc(margin, func, gc_func):
    def gc_callback(phase, info):
        if phase == 'start':
            gc_func()

    garbage = []
    garbage_append = garbage.append
    gc.collect()
    gc.callbacks.append(gc_callback)
    try:
        remaining = gc.get_threshold()[0] - gc.get_count()[0]
        for _ in range(remaining - margin):
            garbage_append(Garbage())
        func()
    finally:
        gc.callbacks.pop()
Exemplo n.º 33
0
    def f(*args, **kwargs):
        now = reactor.seconds()
        num_pending = 0

        # _newTimedCalls is one long list of *all* pending calls. Below loop
        # is based off of impl of reactor.runUntilCurrent
        for delayed_call in reactor._newTimedCalls:
            if delayed_call.time > now:
                break

            if delayed_call.delayed_time > 0:
                continue

            num_pending += 1

        num_pending += len(reactor.threadCallQueue)
        start = time.time() * 1000
        ret = func(*args, **kwargs)
        end = time.time() * 1000

        # record the amount of wallclock time spent running pending calls.
        # This is a proxy for the actual amount of time between reactor polls,
        # since about 25% of time is actually spent running things triggered by
        # I/O events, but that is harder to capture without rewriting half the
        # reactor.
        tick_time.inc_by(end - start)
        pending_calls_metric.inc_by(num_pending)

        if running_on_pypy:
            return ret

        # Check if we need to do a manual GC (since its been disabled), and do
        # one if necessary.
        threshold = gc.get_threshold()
        counts = gc.get_count()
        for i in (2, 1, 0):
            if threshold[i] < counts[i]:
                logger.info("Collecting gc %d", i)

                start = time.time() * 1000
                unreachable = gc.collect(i)
                end = time.time() * 1000

                gc_time.inc_by(end - start, i)
                gc_unreachable.inc_by(unreachable, i)

        return ret
Exemplo n.º 34
0
    def __init__(self, config, hasOwnerView=False):
        BaseDOManager.__init__(self, hasOwnerView)

        self.netSys = NetworkSystem()
        self.netCallbacks = NetworkCallbacks()
        self.netCallbacks.setCallback(self.__handleNetCallback)
        self.connected = False
        self.connectionHandle = None
        self.connectSuccessCallback = None
        self.connectFailureCallback = None

        self.msgType = 0

        self.config = config

        # Accept this hook so that we can respond to lost-connection
        # events in the main thread, instead of within the network
        # thread (if there is one).
        self.accept(self._getLostConnectionEvent(), self.lostConnection)

        # This DatagramIterator is constructed once, and then re-used
        # each time we read a datagram.
        self.private__di = PyDatagramIterator()

        self._serverAddress = ''

        self.readerPollTaskObj = None

        if self.config.GetBool('gc-save-all', 1):
            # set gc to preserve every object involved in a cycle, even ones that
            # would normally be freed automatically during garbage collect
            # allows us to find and fix these cycles, reducing or eliminating the
            # need to run garbage collects
            # garbage collection CPU usage is O(n), n = number of Python objects
            gc.set_debug(gc.DEBUG_SAVEALL)

        if self.config.GetBool('want-garbage-collect-task', 1):
            # manual garbage-collect task
            taskMgr.add(self._garbageCollect, self.GarbageCollectTaskName, 200)
            # periodically increase gc threshold if there is no garbage
            taskMgr.doMethodLater(
                self.config.GetFloat('garbage-threshold-adjust-delay',
                                     5 * 60.), self._adjustGcThreshold,
                self.GarbageThresholdTaskName)

        self._gcDefaultThreshold = gc.get_threshold()
 def __init__(self, connectMethod, config, hasOwnerView = False, threadedNet = None):
     if threadedNet is None:
         threadedNet = config.GetBool('threaded-net', False)
     
     CConnectionRepository.__init__(self, hasOwnerView, threadedNet)
     self.setWantMessageBundling(config.GetBool('want-message-bundling', 1))
     DoInterestManager.__init__(self)
     DoCollectionManager.__init__(self)
     self.setPythonRepository(self)
     self.uniqueId = hash(self)
     self.accept(self._getLostConnectionEvent(), self.lostConnection)
     self.config = config
     if self.config.GetBool('verbose-repository'):
         self.setVerbose(1)
     
     userConnectMethod = self.config.GetString('connect-method', 'default')
     if userConnectMethod == 'http':
         connectMethod = self.CM_HTTP
     elif userConnectMethod == 'net':
         connectMethod = self.CM_NET
     elif userConnectMethod == 'native':
         connectMethod = self.CM_NATIVE
     
     self.connectMethod = connectMethod
     if self.connectMethod == self.CM_HTTP:
         self.notify.info("Using connect method 'http'")
     elif self.connectMethod == self.CM_NET:
         self.notify.info("Using connect method 'net'")
     elif self.connectMethod == self.CM_NATIVE:
         self.notify.info("Using connect method 'native'")
     
     self.connectHttp = None
     self.http = None
     self.private__di = PyDatagramIterator()
     self.recorder = None
     self.readerPollTaskObj = None
     self.dcSuffix = ''
     self._serverAddress = ''
     if self.config.GetBool('gc-save-all', 1):
         gc.set_debug(gc.DEBUG_SAVEALL)
     
     if self.config.GetBool('want-garbage-collect-task', 1):
         taskMgr.add(self._garbageCollect, self.GarbageCollectTaskName, 200)
         taskMgr.doMethodLater(self.config.GetFloat('garbage-threshold-adjust-delay', 5 * 60.0), self._adjustGcThreshold, self.GarbageThresholdTaskName)
     
     self._gcDefaultThreshold = gc.get_threshold()
Exemplo n.º 36
0
    def collect_metrics(self):
        u = resource.getrusage(resource.RUSAGE_SELF)
        if gc_.isenabled():
            c = list(gc_.get_count())
            th = list(gc_.get_threshold())
            g = GC(collect0=c[0] if not self.last_collect else c[0] - self.last_collect[0],
                   collect1=c[1] if not self.last_collect else c[
                       1] - self.last_collect[1],
                   collect2=c[2] if not self.last_collect else c[
                       2] - self.last_collect[2],
                   threshold0=th[0],
                   threshold1=th[1],
                   threshold2=th[2])

        thr = threading.enumerate()
        daemon_threads = [tr.daemon is True for tr in thr].count(True)
        alive_threads = [tr.daemon is False for tr in thr].count(True)
        dummy_threads = [type(tr) is threading._DummyThread for tr in thr].count(True)

        m = Metrics(ru_utime=u[0] if not self.last_usage else u[0] - self.last_usage[0],
                    ru_stime=u[1] if not self.last_usage else u[1] - self.last_usage[1],
                    ru_maxrss=u[2],
                    ru_ixrss=u[3],
                    ru_idrss=u[4],
                    ru_isrss=u[5],
                    ru_minflt=u[6] if not self.last_usage else u[6] - self.last_usage[6],
                    ru_majflt=u[7] if not self.last_usage else u[7] - self.last_usage[7],
                    ru_nswap=u[8] if not self.last_usage else u[8] - self.last_usage[8],
                    ru_inblock=u[9] if not self.last_usage else u[9] - self.last_usage[9],
                    ru_oublock=u[10] if not self.last_usage else u[10] - self.last_usage[10],
                    ru_msgsnd=u[11] if not self.last_usage else u[11] - self.last_usage[11],
                    ru_msgrcv=u[12] if not self.last_usage else u[12] - self.last_usage[12],
                    ru_nsignals=u[13] if not self.last_usage else u[13] - self.last_usage[13],
                    ru_nvcs=u[14] if not self.last_usage else u[14] - self.last_usage[14],
                    ru_nivcsw=u[15] if not self.last_usage else u[15] - self.last_usage[15],
                    alive_threads=alive_threads,
                    dummy_threads=dummy_threads,
                    daemon_threads=daemon_threads,
                    gc=g)

        self.last_usage = u
        if gc_.isenabled():
            self.last_collect = c

        return m
Exemplo n.º 37
0
    def __init__(self, interval=1.0, debug=False):
        """
        Initializes garbage collector

        @param interval float: timeout interval in seconds. Default: 1s
        @param debug bool: debug output. Default: False
        """
        super().__init__()
        self.debug = debug
        if debug:
            gc.set_debug(gc.DEBUG_LEAK)

        self.timer = QTimer()
        self.timer.timeout.connect(self.check)

        self.threshold = gc.get_threshold()
        gc.disable()
        self.timer.start(interval * 1000)
Exemplo n.º 38
0
 def _adjustGcThreshold(self, task):
     # do an unconditional collect to make sure gc.garbage has a chance to be
     # populated before we start increasing the auto-collect threshold
     # don't distribute the leak check from the client to the AI, they both
     # do these garbage checks independently over time
     numGarbage = GarbageReport.checkForGarbageLeaks()
     if numGarbage == 0:
         self.gcNotify.debug('no garbage found, doubling gc threshold')
         a, b, c = gc.get_threshold()
         gc.set_threshold(min(a * 2, 1 << 30), b, c)
         task.delayTime = task.delayTime * 2
         retVal = Task.again
     else:
         self.gcNotify.warning('garbage found, reverting gc threshold')
         # the process is producing garbage, stick to the default collection threshold
         gc.set_threshold(*self._gcDefaultThreshold)
         retVal = Task.done
     return retVal
Exemplo n.º 39
0
def f3():
    print("----0----")
    # print(gc.collect())
    c1 = classA()
    c2 = classA()
    c1.t = c2
    c2.t = c1
    print("----1----")
    del c1
    del c2
    print("----2----")
    print(gc.garbage)
    print("----3----")
    print(gc.collect())  # 显示回收
    print("----4----")
    print(gc.garbage)
    print("----5----")
    print(gc.get_threshold())
Exemplo n.º 40
0
    def init(self):
        self.log("""Tweaking garbage collection. Is it currently turned on? %s
Current thresholds: %s
Current counts: %s
Enabling GB debug output (check stderr)""" % (str(gc.isenabled()), repr(gc.get_threshold()), repr(gc.get_count())))
        #self.log("Enabling GB debug output (check stderr)")
        gc.set_debug(gc.DEBUG_STATS)
        self.log("Forcing collection of generation 0...")
        gc.collect(0)
        self.log("Forcing collection of generation 1...")
        gc.collect(1)
        self.log("Forcing collection of generation 2...")
        gc.collect(2)
        unclaimed = ['A total of %s objects that could not be freed:' % (len(gc.garbage),)]
        for i in gc.garbage:
            unclaimed.append('%s: %s (%s)' % (type(i), str(i), repr(i)))
        self.log('\n'.join(unclaimed))
        self.log("Done.")
Exemplo n.º 41
0
def test_threads(comm):
    # This test shall not core dump
    # raise many errors here and there on many threads

    from threading import Thread, Event
    import gc
    fname = tempfile.mkdtemp()
    x = BigFile(fname, create=True)

    b = x.create("Threading", Nfile=1, dtype='i8', size=128)

    old = gc.get_threshold()

    gc.set_threshold(1, 1, 1)
    E = Event()
    def func(i, b):
        E.wait()
        x['.'].attrs['v3'] = [1, 2, 3]
        err = 0
        for j in range(100 * i):
            try:
                with pytest.raises(BigFileError):
                    b.attrs['v 3'] = ['a', 'bb', 'ccc']

                b.write(0, numpy.ones(128))
            except BigBlockClosedError:
                err = err + 1

        b.close()

        x['Threading'].attrs['v3'] = [1, 2, 3]

    t = []
    for i in range(4):
        t.append(Thread(target = func, args=(i, b)))

    for i in t: i.start()

    E.set()

    for i in t: i.join()

    gc.set_threshold(*old)
    shutil.rmtree(fname)
Exemplo n.º 42
0
    def test_del_newclass(self):
        if test_support.due_to_ironpython_incompatibility(
                "http://tkbgitvstfat01:8080/WorkItemTracking/WorkItem.aspx?artifactMoniker=314470"
        ):
            return
        # __del__ methods can trigger collection, make this to happen
        thresholds = gc.get_threshold()
        gc.enable()
        gc.set_threshold(1)

        class A(object):
            def __del__(self):
                dir(self)

        a = A()
        del a

        gc.disable()
        gc.set_threshold(*thresholds)
Exemplo n.º 43
0
    def test_issue95818(self):
        #See GH-95818 for details
        import gc
        self.addCleanup(gc.set_threshold, *gc.get_threshold())

        gc.set_threshold(1,1,1)
        class GCHello:
            def __del__(self):
                print("Destroyed from gc")

        def gen():
            yield

        fd = open(__file__)
        l = [fd, GCHello()]
        l.append(l)
        del fd
        del l
        gen()
Exemplo n.º 44
0
 def test_thresholds(self):
     self.assertTrue(REGISTRY.get_sample_value(
         'python_gc_threshold', labels={'generation': '0'}) is not None)
     self.assertTrue(REGISTRY.get_sample_value(
         'python_gc_threshold', labels={'generation': '1'}) is not None)
     self.assertTrue(REGISTRY.get_sample_value(
         'python_gc_threshold', labels={'generation': '2'}) is not None)
     original_thresholds = gc.get_threshold()
     try:
         gc.disable()
         gc.set_threshold(42, 43, 44)
         self.assertEqual(42, REGISTRY.get_sample_value(
             'python_gc_threshold', labels={'generation': '0'}))
         self.assertEqual(43, REGISTRY.get_sample_value(
             'python_gc_threshold', labels={'generation': '1'}))
         self.assertEqual(44, REGISTRY.get_sample_value(
             'python_gc_threshold', labels={'generation': '2'}))
     finally:
         gc.set_threshold(*original_thresholds)
         gc.enable()
Exemplo n.º 45
0
    def check_gc_during_creation(self, makeref):
        thresholds = gc.get_threshold()
        gc.set_threshold(1, 1, 1)
        gc.collect()

        class A:
            pass

        def callback(*args):
            pass

        referenced = A()
        a = A()
        a.a = a
        a.wr = makeref(referenced)
        try:
            a = A()
            weakref.ref(referenced, callback)
        finally:
            gc.set_threshold(*thresholds)
Exemplo n.º 46
0
 def test_len_race(self):
     self.addCleanup(gc.set_threshold, *gc.get_threshold())
     for th in range(1, 100):
         N = 20
         gc.collect(0)
         gc.set_threshold(th, th, th)
         items = [RefCycle() for i in range(N)]
         s = WeakSet(items)
         del items
         it = iter(s)
         try:
             next(it)
         except StopIteration:
             pass
         n1 = len(s)
         del it
         n2 = len(s)
         self.assertGreaterEqual(n1, 0)
         self.assertLessEqual(n1, N)
         self.assertGreaterEqual(n2, 0)
         self.assertLessEqual(n2, n1)
Exemplo n.º 47
0
 def check_len_race(self, dict_type, cons):
     self.addCleanup(gc.set_threshold, *gc.get_threshold())
     for th in range(1, 100):
         N = 20
         gc.collect(0)
         gc.set_threshold(th, th, th)
         items = [RefCycle() for i in range(N)]
         dct = dict_type(cons(o) for o in items)
         del items
         it = dct.items()
         try:
             next(it)
         except StopIteration:
             pass
         n1 = len(dct)
         del it
         n2 = len(dct)
         self.assertGreaterEqual(n1, 0)
         self.assertLessEqual(n1, N)
         self.assertGreaterEqual(n2, 0)
         self.assertLessEqual(n2, n1)
Exemplo n.º 48
0
    def f(*args, **kwargs):
        now = reactor.seconds()
        num_pending = 0

        # _newTimedCalls is one long list of *all* pending calls. Below loop
        # is based off of impl of reactor.runUntilCurrent
        for delayed_call in reactor._newTimedCalls:
            if delayed_call.time > now:
                break

            if delayed_call.delayed_time > 0:
                continue

            num_pending += 1

        num_pending += len(reactor.threadCallQueue)

        start = time.time() * 1000
        ret = func(*args, **kwargs)
        end = time.time() * 1000
        tick_time.inc_by(end - start)
        pending_calls_metric.inc_by(num_pending)

        # Check if we need to do a manual GC (since its been disabled), and do
        # one if necessary.
        threshold = gc.get_threshold()
        counts = gc.get_count()
        for i in (2, 1, 0):
            if threshold[i] < counts[i]:
                logger.info("Collecting gc %d", i)

                start = time.time() * 1000
                unreachable = gc.collect(i)
                end = time.time() * 1000

                gc_time.inc_by(end - start, i)
                gc_unreachable.inc_by(unreachable, i)

        return ret
Exemplo n.º 49
0
 def check_len_race(self, dict_type, cons):
     # Extended sanity checks for len() in the face of cyclic collection
     self.addCleanup(gc.set_threshold, *gc.get_threshold())
     for th in range(1, 100):
         N = 20
         gc.collect(0)
         gc.set_threshold(th, th, th)
         items = [RefCycle() for i in range(N)]
         dct = dict_type(cons(o) for o in items)
         del items
         # All items will be collected at next garbage collection pass
         it = dct.items()
         try:
             next(it)
         except StopIteration:
             pass
         n1 = len(dct)
         del it
         n2 = len(dct)
         self.assertGreaterEqual(n1, 0)
         self.assertLessEqual(n1, N)
         self.assertGreaterEqual(n2, 0)
         self.assertLessEqual(n2, n1)
Exemplo n.º 50
0
 def test_len_race(self):
     # Extended sanity checks for len() in the face of cyclic collection
     self.addCleanup(gc.set_threshold, *gc.get_threshold())
     for th in range(1, 100):
         N = 20
         gc.collect(0)
         gc.set_threshold(th, th, th)
         items = [RefCycle() for i in range(N)]
         s = WeakSet(items)
         del items
         # All items will be collected at next garbage collection pass
         it = iter(s)
         try:
             next(it)
         except StopIteration:
             pass
         n1 = len(s)
         del it
         n2 = len(s)
         self.assertGreaterEqual(n1, 0)
         self.assertLessEqual(n1, N)
         self.assertGreaterEqual(n2, 0)
         self.assertLessEqual(n2, n1)
Exemplo n.º 51
0
 def collect_process_metrics(self):
     """
     To collect all the process metrics.
     """
     rm = resource.getrusage(resource.RUSAGE_SELF)
     resource_metrics = {}
     for res_name in RESOURCE_STRUCT_RUSAGE:
         resource_metrics[res_name] = getattr(rm, res_name)
     for res_name in RESOURCE_STRUCT_RUSAGE[2:6]:
         resource_metrics[res_name] = resource_metrics[res_name] * 1024
     resource_metrics = [
         cur_val - pre_val for cur_val, pre_val in zip(
             [resource_metrics[key]
              for key in resource_metrics], self.previous_resource_metrics)
     ]
     self._collect_counter_from_list(self.resource_metric_names,
                                     resource_metrics)
     th_values = threading.enumerate()
     thread_metrics = [[t.daemon is True for t in th_values].count(True),
                       [t.daemon is False for t in th_values].count(True),
                       [
                           isinstance(t, threading._DummyThread)
                           for t in th_values
                       ].count(True)]
     self._collect_gauge_from_list(self.thread_metrics_names,
                                   thread_metrics)
     if gc.isenabled():
         collection = list(gc.get_count())
         threshold = list(gc.get_threshold())
         gc_metrics = collection + threshold
         gc_metrics = [
             cur_val - pre_val for cur_val, pre_val in zip(
                 gc_metrics, self.previous_gc_metrics)
         ]
         self._collect_counter_from_list(self.gc_metric_names, gc_metrics)
         self.previous_gc_metrics = gc_metrics
     self.previous_resource_metrics = resource_metrics
Exemplo n.º 52
0
    def check_gc_during_creation(self, makeref):
        thresholds = gc.get_threshold()
        gc.set_threshold(1, 1, 1)
        gc.collect()
        class A:
            pass

        def callback(*args):
            pass

        referenced = A()

        a = A()
        a.a = a
        a.wr = makeref(referenced)

        try:
            # now make sure the object and the ref get labeled as
            # cyclic trash:
            a = A()
            weakref.ref(referenced, callback)

        finally:
            gc.set_threshold(*thresholds)
Exemplo n.º 53
0
def test_segfault_issue_52():
    threshold = None
    if hasattr(gc, 'get_threshold'):
        # PyPy is lacking these functions
        threshold = gc.get_threshold()
        gc.set_threshold(1, 1, 1)  # fail fast

    v = [pmap()]

    def step():
        depth = random.randint(1, 10)
        path = random.sample(range(100000), depth)
        v[0] = v[0].transform(path, "foo")

    for i in range(1000):  # usually crashes after 10-20 steps
        while True:
            try:
                step()
                break
            except AttributeError:  # evolver on string
                continue

    if threshold:
        gc.set_threshold(*threshold)
Exemplo n.º 54
0
def main(
    scheduler,
    host,
    worker_port,
    listen_address,
    contact_address,
    nanny_port,
    nthreads,
    nprocs,
    nanny,
    name,
    memory_limit,
    pid_file,
    reconnect,
    resources,
    dashboard,
    bokeh_port,
    local_directory,
    scheduler_file,
    interface,
    protocol,
    death_timeout,
    preload,
    preload_argv,
    dashboard_prefix,
    tls_ca_file,
    tls_cert,
    tls_key,
    dashboard_address,
):
    g0, g1, g2 = gc.get_threshold(
    )  # https://github.com/dask/distributed/issues/1653
    gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)

    enable_proctitle_on_current()
    enable_proctitle_on_children()

    if bokeh_port is not None:
        warnings.warn(
            "The --bokeh-port flag has been renamed to --dashboard-address. "
            "Consider adding ``--dashboard-address :%d`` " % bokeh_port)
        dashboard_address = bokeh_port

    sec = Security(tls_ca_file=tls_ca_file,
                   tls_worker_cert=tls_cert,
                   tls_worker_key=tls_key)

    if nprocs > 1 and worker_port != 0:
        logger.error(
            "Failed to launch worker.  You cannot use the --port argument when nprocs > 1."
        )
        exit(1)

    if nprocs > 1 and not nanny:
        logger.error(
            "Failed to launch worker.  You cannot use the --no-nanny argument when nprocs > 1."
        )
        exit(1)

    if contact_address and not listen_address:
        logger.error(
            "Failed to launch worker. "
            "Must specify --listen-address when --contact-address is given")
        exit(1)

    if nprocs > 1 and listen_address:
        logger.error("Failed to launch worker. "
                     "You cannot specify --listen-address when nprocs > 1.")
        exit(1)

    if (worker_port or host) and listen_address:
        logger.error(
            "Failed to launch worker. "
            "You cannot specify --listen-address when --worker-port or --host is given."
        )
        exit(1)

    try:
        if listen_address:
            (host, worker_port) = get_address_host_port(listen_address,
                                                        strict=True)

        if contact_address:
            # we only need this to verify it is getting parsed
            (_, _) = get_address_host_port(contact_address, strict=True)
        else:
            # if contact address is not present we use the listen_address for contact
            contact_address = listen_address
    except ValueError as e:
        logger.error("Failed to launch worker. " + str(e))
        exit(1)

    if nanny:
        port = nanny_port
    else:
        port = worker_port

    if not nthreads:
        nthreads = _ncores // nprocs

    if pid_file:
        with open(pid_file, "w") as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    services = {}

    if resources:
        resources = resources.replace(",", " ").split()
        resources = dict(pair.split("=") for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    if nanny:
        kwargs = {"worker_port": worker_port, "listen_address": listen_address}
        t = Nanny
    else:
        kwargs = {}
        if nanny_port:
            kwargs["service_ports"] = {"nanny": nanny_port}
        t = Worker

    if (not scheduler and not scheduler_file
            and dask.config.get("scheduler-address", None) is None):
        raise ValueError("Need to provide scheduler address like\n"
                         "dask-worker SCHEDULER_ADDRESS:8786")

    if death_timeout is not None:
        death_timeout = parse_timedelta(death_timeout, "s")

    nannies = [
        t(scheduler,
          scheduler_file=scheduler_file,
          ncores=nthreads,
          services=services,
          loop=loop,
          resources=resources,
          memory_limit=memory_limit,
          reconnect=reconnect,
          local_dir=local_directory,
          death_timeout=death_timeout,
          preload=preload,
          preload_argv=preload_argv,
          security=sec,
          contact_address=contact_address,
          interface=interface,
          protocol=protocol,
          host=host,
          port=port,
          dashboard_address=dashboard_address if dashboard else None,
          service_kwargs={"bokhe": {
              "prefix": dashboard_prefix
          }},
          name=name if nprocs == 1 or not name else name + "-" + str(i),
          **kwargs) for i in range(nprocs)
    ]

    @gen.coroutine
    def close_all():
        # Unregister all workers from scheduler
        if nanny:
            yield [n.close(timeout=2) for n in nannies]

    def on_signal(signum):
        logger.info("Exiting on signal %d", signum)
        close_all()

    @gen.coroutine
    def run():
        yield nannies
        while all(n.status != "closed" for n in nannies):
            yield gen.sleep(0.2)

    install_signal_handlers(loop, cleanup=on_signal)

    try:
        loop.run_sync(run)
    except (KeyboardInterrupt, TimeoutError):
        pass
    finally:
        logger.info("End worker")
Exemplo n.º 55
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time :2020/12/24 10:24
# @Author :春衫
# @File :class_10day_gc.py

import gc

res = gc.get_threshold()
print(res)
# (700, 10, 10) 第一代链表达到700个变量时,清理一次,剩下的移到第二代链表
# 第一代链表清理十次之后,清理第二代链表,以此类推
# 最多三代链表,幸存的一直存在三代链表里面
Exemplo n.º 56
0
    def log(cls):
        print('实例个数:', cls.__personCount)


# p1=Person()
# print(p1)
# Person.log()
# p2=Person()
# Person.log()
# del p1
# Person.log()

p1 = Person()
# 调用该函数会自动给引用计数加1
print(sys.getrefcount(p1))
p2 = p1
print(sys.getrefcount(p2))
del p1
print(sys.getrefcount(p2))
del p2
# print(sys.getrefcount(p2))

import gc
# (700,10,10)含义是:当创建的对象个数减去消亡的对象个数大于700时进行垃圾回收机制
# 且当0代回收执行10次后才执行一次1代和0代的共同回收,当1代回收进行了10次之后才进行1次2代、1代和0代的共同回收
print(gc.get_threshold())
gc.set_threshold(200, 5, 5)
print(gc.get_threshold())
# 手动触发垃圾回收机制,可以填入要执行的垃圾回收的代数(0、1、2),不填则执行一次完全意义上的垃圾回收
gc.collect()
Exemplo n.º 57
0
    def run(self):
        while not self.terminated:
            try:
                delay, framenb, motion = self.q.popleft()
                fdata = None
                with self.lock:
                    fdata, nbytes = self.decoder.decode_frame(
                        self.frame2decode)

                image = self.image
                image.fill(220)

                if fdata:
                    (frame, w, h, ls) = fdata
                    if frame:
                        image = np.fromstring(frame, dtype=np.uint8)

                w = self.resx
                h = self.resy
                image = image.reshape((h, w, 3))
                re = motion[0]
                vv = motion[1] * 16
                mm = motion[2]

                x0 = re[0]
                y0 = re[1]
                w = re[2]
                h = re[3]
                xmin = mm[0] * 16
                ymin = mm[1] * 16
                xmax = mm[2] * 16
                ymax = mm[3] * 16
                x1 = x0 + w
                y1 = y0 + h
                x0 *= 16
                x1 *= 16
                y0 *= 16
                y1 *= 16

                # center line
                if self.ycross > 0:
                    cv2.line(image, (0, int(self.ycross)),
                             (self.resx, int(self.ycross)), (0, 0, 0), 1)

                if self.xcross > 0:
                    cv2.line(image, (int(self.xcross), 0),
                             (int(self.xcross), self.resy), (0, 0, 0), 1)

                if framenb < 0:
                    framenb = -framenb
                    #cv2.rectangle(image,(x0,y0),(x1,y1),(20,20,220),1)
                    cv2.rectangle(image, (xmin, ymin), (xmax, ymax),
                                  (20, 20, 220), 1)
                else:
                    cv2.rectangle(image, (xmin, ymin), (xmax, ymax),
                                  (200, 200, 200), 1)
                    cv2.rectangle(image, (x0, y0), (x1, y1), (20, 220, 20), 1)

                #txt = "%4.1fms" % (delay*1000.0)
                txt = "%4.1f" % (np.linalg.norm(motion[1]))
                cv2.putText(image, txt, (int(x0), int(y0)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (20, 220, 20), 1)
                xm = int((x1 + x0) / 2)
                ym = int((y1 + y0) / 2)
                xe = int(xm - vv[0])
                ye = int(ym - vv[1])
                cv2.arrowedLine(image, (xm, ym), (xe, ye), (20, 220, 20), 1)
                #image = cv2.resize(image,None,fx=0.5,fy=0.5,interpolation=cv2.INTER_LINEAR)
                image = np.rot90(image, self.k)
                imagepath = self.imgtemplate % self.nbimage
                if self.nbimage > self.nimages:
                    self.nbimage = 0
                else:
                    self.nbimage += 1
                cv2.imwrite(imagepath, image, [cv2.IMWRITE_JPEG_QUALITY, 90])
                try:
                    fs = open(self.imgctrl_file, "w")
                    fs.write(imagepath)
                    fs.close()
                except:
                    print("cannot write %s" % self.imgctrl_file)
                    pass

                #- do garbage collection here!
                c0, c1, c2 = gc.get_count()
                t0, t1, t2 = gc.get_threshold()
                if c0 > t0 or c1 > t1 or c2 > t2:
                    gc.collect()
                self.written = True

            except IndexError:
                self.event.clear()
                self.event.wait(1)
Exemplo n.º 58
0
def main(
    host,
    port,
    bokeh_port,
    show,
    dashboard,
    bokeh,
    dashboard_prefix,
    use_xheaders,
    pid_file,
    local_directory,
    tls_ca_file,
    tls_cert,
    tls_key,
    dashboard_address,
    **kwargs
):
    g0, g1, g2 = gc.get_threshold()  # https://github.com/dask/distributed/issues/1653
    gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)

    enable_proctitle_on_current()
    enable_proctitle_on_children()

    if bokeh_port is not None:
        warnings.warn(
            "The --bokeh-port flag has been renamed to --dashboard-address. "
            "Consider adding ``--dashboard-address :%d`` " % bokeh_port
        )
        dashboard_address = bokeh_port
    if bokeh is not None:
        warnings.warn(
            "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
        )
        dashboard = bokeh

    if port is None and (not host or not re.search(r":\d", host)):
        port = 8786

    sec = Security(
        **{
            k: v
            for k, v in [
                ("tls_ca_file", tls_ca_file),
                ("tls_scheduler_cert", tls_cert),
                ("tls_scheduler_key", tls_key),
            ]
            if v is not None
        }
    )

    if not host and (tls_ca_file or tls_cert or tls_key):
        host = "tls://"

    if pid_file:
        with open(pid_file, "w") as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    local_directory_created = False
    if local_directory:
        if not os.path.exists(local_directory):
            os.mkdir(local_directory)
            local_directory_created = True
    else:
        local_directory = tempfile.mkdtemp(prefix="scheduler-")
        local_directory_created = True
    if local_directory not in sys.path:
        sys.path.insert(0, local_directory)

    if sys.platform.startswith("linux"):
        import resource  # module fails importing on Windows

        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        limit = max(soft, hard // 2)
        resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))

    loop = IOLoop.current()
    logger.info("-" * 47)

    scheduler = Scheduler(
        loop=loop,
        security=sec,
        host=host,
        port=port,
        dashboard_address=dashboard_address if dashboard else None,
        service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
        **kwargs,
    )
    logger.info("Local Directory: %26s", local_directory)
    logger.info("-" * 47)

    install_signal_handlers(loop)

    async def run():
        await scheduler
        await scheduler.finished()

    try:
        loop.run_sync(run)
    finally:
        scheduler.stop()
        if local_directory_created:
            shutil.rmtree(local_directory)

        logger.info("End scheduler at %r", scheduler.address)
Exemplo n.º 59
0
def main(scheduler, host, worker_port, listen_address, contact_address,
         nanny_port, nthreads, nprocs, nanny, name, pid_file, resources,
         dashboard, bokeh, bokeh_port, scheduler_file, dashboard_prefix,
         tls_ca_file, tls_cert, tls_key, dashboard_address, worker_class,
         preload_nanny, **kwargs):
    g0, g1, g2 = gc.get_threshold(
    )  # https://github.com/dask/distributed/issues/1653
    gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)

    enable_proctitle_on_current()
    enable_proctitle_on_children()

    if bokeh_port is not None:
        warnings.warn(
            "The --bokeh-port flag has been renamed to --dashboard-address. "
            "Consider adding ``--dashboard-address :%d`` " % bokeh_port)
        dashboard_address = bokeh_port
    if bokeh is not None:
        warnings.warn(
            "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
        )
        dashboard = bokeh

    sec = {
        k: v
        for k, v in [
            ("tls_ca_file", tls_ca_file),
            ("tls_worker_cert", tls_cert),
            ("tls_worker_key", tls_key),
        ] if v is not None
    }

    if nprocs < 0:
        nprocs = CPU_COUNT + 1 + nprocs

    if nprocs <= 0:
        logger.error(
            "Failed to launch worker. Must specify --nprocs so that there's at least one process."
        )
        sys.exit(1)

    if nprocs > 1 and not nanny:
        logger.error(
            "Failed to launch worker.  You cannot use the --no-nanny argument when nprocs > 1."
        )
        sys.exit(1)

    if contact_address and not listen_address:
        logger.error(
            "Failed to launch worker. "
            "Must specify --listen-address when --contact-address is given")
        sys.exit(1)

    if nprocs > 1 and listen_address:
        logger.error("Failed to launch worker. "
                     "You cannot specify --listen-address when nprocs > 1.")
        sys.exit(1)

    if (worker_port or host) and listen_address:
        logger.error(
            "Failed to launch worker. "
            "You cannot specify --listen-address when --worker-port or --host is given."
        )
        sys.exit(1)

    try:
        if listen_address:
            (host, worker_port) = get_address_host_port(listen_address,
                                                        strict=True)

        if contact_address:
            # we only need this to verify it is getting parsed
            (_, _) = get_address_host_port(contact_address, strict=True)
        else:
            # if contact address is not present we use the listen_address for contact
            contact_address = listen_address
    except ValueError as e:
        logger.error("Failed to launch worker. " + str(e))
        sys.exit(1)

    if nanny:
        port = nanny_port
    else:
        port = worker_port

    if not nthreads:
        nthreads = CPU_COUNT // nprocs

    if pid_file:
        with open(pid_file, "w") as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    if resources:
        resources = resources.replace(",", " ").split()
        resources = dict(pair.split("=") for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    worker_class = import_term(worker_class)
    if nanny:
        kwargs["worker_class"] = worker_class
        kwargs["preload_nanny"] = preload_nanny

    if nanny:
        kwargs.update({
            "worker_port": worker_port,
            "listen_address": listen_address
        })
        t = Nanny
    else:
        if nanny_port:
            kwargs["service_ports"] = {"nanny": nanny_port}
        t = worker_class

    if (not scheduler and not scheduler_file
            and dask.config.get("scheduler-address", None) is None):
        raise ValueError("Need to provide scheduler address like\n"
                         "dask-worker SCHEDULER_ADDRESS:8786")

    with suppress(TypeError, ValueError):
        name = int(name)

    if "DASK_INTERNAL_INHERIT_CONFIG" in os.environ:
        config = deserialize_for_cli(
            os.environ["DASK_INTERNAL_INHERIT_CONFIG"])
        # Update the global config given priority to the existing global config
        dask.config.update(dask.config.global_config, config, priority="old")

    nannies = [
        t(scheduler,
          scheduler_file=scheduler_file,
          nthreads=nthreads,
          loop=loop,
          resources=resources,
          security=sec,
          contact_address=contact_address,
          host=host,
          port=port,
          dashboard=dashboard,
          dashboard_address=dashboard_address,
          name=name if nprocs == 1 or name is None or name == "" else
          str(name) + "-" + str(i),
          **kwargs) for i in range(nprocs)
    ]

    async def close_all():
        # Unregister all workers from scheduler
        if nanny:
            await asyncio.gather(*[n.close(timeout=2) for n in nannies])

    signal_fired = False

    def on_signal(signum):
        nonlocal signal_fired
        signal_fired = True
        if signum != signal.SIGINT:
            logger.info("Exiting on signal %d", signum)
        return asyncio.ensure_future(close_all())

    async def run():
        await asyncio.gather(*nannies)
        await asyncio.gather(*[n.finished() for n in nannies])

    install_signal_handlers(loop, cleanup=on_signal)

    try:
        loop.run_sync(run)
    except TimeoutError:
        # We already log the exception in nanny / worker. Don't do it again.
        if not signal_fired:
            logger.info("Timed out starting worker")
        sys.exit(1)
    except KeyboardInterrupt:
        pass
    finally:
        logger.info("End worker")
Exemplo n.º 60
0
        elif k == '-L':
            LOOP = True
        elif k == '-u':
            GUI = 1
        elif k == '-m':
            GUI = 'minimal'
        elif k == '-p':
            progress = True
        elif k == '-T':
            TRACE = True
        elif k == '-v':
            VERBOSE += 1

    if gcthresh is not None:
        gc.set_threshold(gcthresh)
        print 'gc threshold:', gc.get_threshold()

    if gcflags:
        import gc
        val = 0
        for flag in gcflags:
            v = getattr(gc, flag, None)
            if v is None:
                print "Unknown gc flag", repr(flag)
                print gc.set_debug.__doc__
                sys.exit(1)
            val |= v
        gc.set_debug(v)

    # Do the builds
    if build: