def collect(self):
        # In case of non-monotonicity in the clock, assume that any Python
        # operation lasts at least 1e-6 second.
        MIN_RUNTIME = 1e-6

        collect_start = thread_time()
        elapsed = max(collect_start - self.last_collect, MIN_RUNTIME)
        if self.last_gc_duration / elapsed < self.max_in_gc_frac:
            self.logger.debug(
                "Calling gc.collect(). %0.3fs elapsed since previous call.",
                elapsed)
            gc.collect()
            self.last_collect = collect_start
            self.last_gc_duration = max(thread_time() - collect_start,
                                        MIN_RUNTIME)
            if self.last_gc_duration > self.warn_if_longer:
                self.logger.warning(
                    "gc.collect() took %0.3fs. This is usually"
                    " a sign that some tasks handle too"
                    " many Python objects at the same time."
                    " Rechunking the work into smaller tasks"
                    " might help.",
                    self.last_gc_duration,
                )
            else:
                self.logger.debug("gc.collect() took %0.3fs",
                                  self.last_gc_duration)
        else:
            self.logger.debug(
                "gc.collect() lasts %0.3fs but only %0.3fs "
                "elapsed since last call: throttling.",
                self.last_gc_duration,
                elapsed,
            )
Exemple #2
0
def test_gc_diagnosis_cpu_time():
    diag = GCDiagnosis(warn_over_frac=0.75)
    diag.N_SAMPLES = 3  # shorten tests

    with enable_gc_diagnosis_and_log(diag, level="WARN") as sio:
        # Spend some CPU time doing only full GCs
        for i in range(diag.N_SAMPLES):
            gc.collect()
        assert not sio.getvalue()
        gc.collect()
        lines = sio.getvalue().splitlines()
        assert len(lines) == 1
        # Between 80% and 100%
        assert re.match(
            r"full garbage collections took (100|[89][0-9])% "
            r"CPU time recently",
            lines[0],
        )

    with enable_gc_diagnosis_and_log(diag, level="WARN") as sio:
        # Spend half the CPU time doing full GCs
        for i in range(diag.N_SAMPLES + 1):
            t1 = thread_time()
            gc.collect()
            dt = thread_time() - t1
            run_for(dt, timer=thread_time)
        # Less than 75% so nothing printed
        assert not sio.getvalue()
def test_gc_diagnosis_cpu_time():
    diag = GCDiagnosis(warn_over_frac=0.75)
    diag.N_SAMPLES = 3  # shorten tests

    with enable_gc_diagnosis_and_log(diag, level='WARN') as sio:
        # Spend some CPU time doing only full GCs
        for i in range(diag.N_SAMPLES):
            gc.collect()
        assert not sio.getvalue()
        gc.collect()
        lines = sio.getvalue().splitlines()
        assert len(lines) == 1
        # Between 80% and 100%
        assert re.match(r"full garbage collections took (100|[89][0-9])% "
                        r"CPU time recently", lines[0])

    with enable_gc_diagnosis_and_log(diag, level='WARN') as sio:
        # Spend half the CPU time doing full GCs
        for i in range(diag.N_SAMPLES + 1):
            t1 = thread_time()
            gc.collect()
            dt = thread_time() - t1
            run_for(dt, timer=thread_time)
        # Less than 75% so nothing printed
        assert not sio.getvalue()
Exemple #4
0
def test_thread_time():
    start = metrics.thread_time()
    run_for(0.05)
    dt = metrics.thread_time() - start
    assert 0.03 <= dt <= 0.2

    # Sleep time not counted
    start = metrics.thread_time()
    time.sleep(0.1)
    dt = metrics.thread_time() - start
    assert dt <= 0.05

    if sys.platform == "linux":
        # Always per-thread on Linux
        t = threading.Thread(target=run_for, args=(0.1, ))
        start = metrics.thread_time()
        t.start()
        t.join()
        dt = metrics.thread_time() - start
        assert dt <= 0.05
Exemple #5
0
def test_thread_time():
    start = metrics.thread_time()
    run_for(0.05)
    dt = metrics.thread_time() - start
    assert 0.03 <= dt <= 0.2

    if PY3:
        # Sleep time not counted
        start = metrics.thread_time()
        time.sleep(0.1)
        dt = metrics.thread_time() - start
        assert dt <= 0.05

        if sys.platform == 'linux':
            # Always per-thread on Linux
            t = threading.Thread(target=run_for, args=(0.1,))
            start = metrics.thread_time()
            t.start()
            t.join()
            dt = metrics.thread_time() - start
            assert dt <= 0.05
 def __init__(self, max_in_gc_frac=0.05, warn_if_longer=1, logger=None):
     self.max_in_gc_frac = max_in_gc_frac
     self.warn_if_longer = warn_if_longer
     self.last_collect = thread_time()
     self.last_gc_duration = 0
     self.logger = logger if logger is not None else _logger