def outer_loop_iteration(): success, paths_counts = gsubtreed.inner_loop(opts.repo, cref, opts.dry_run) for path, count in paths_counts.iteritems(): summary[path] += count commits_counter.increment_by(count, fields={'path': path}) return success
def run(): stdout = sys.stdout stderr = sys.stderr logout = StringIO() root_logger = logging.getLogger() shandler = logging.StreamHandler(logout) shandler.setFormatter(LogFormatter('%(levelname)s: %(message)s')) shandler.addFilter(LogFilterer()) root_logger.addHandler(shandler) shandler.setLevel(logging.INFO) # Run pusher threads sequentially and deterministically. gsubtreed.Pusher.FAKE_THREADING = True success = False processed = {} try: with open(os.devnull, 'w') as dn: # TODO(iannucci): Let expect_tests absorb stdio sys.stderr = sys.stdout = dn local.reify() dry_run = False success, processed = gsubtreed.inner_loop(local, cref, dry_run) except Exception: # pragma: no cover ret.append(traceback.format_exc().splitlines()) finally: gsubtreed.Pusher.FAKE_THREADING = False sys.stdout = stdout sys.stderr = stderr root_logger.removeHandler(shandler) ret.append({'log output': logout.getvalue().splitlines()}) ret.append({ 'inner_loop success': success, 'processed': processed, })
def run(): stdout = sys.stdout stderr = sys.stderr logout = StringIO() root_logger = logging.getLogger() shandler = logging.StreamHandler(logout) shandler.setFormatter(LogFormatter("%(levelname)s: %(message)s")) root_logger.addHandler(shandler) shandler.setLevel(logging.INFO) # Run pusher threads sequentially and deterministically. gsubtreed.Pusher.FAKE_THREADING = True success = False processed = {} try: with open(os.devnull, "w") as dn: # TODO(iannucci): Let expect_tests absorb stdio sys.stderr = sys.stdout = dn local.reify() success, processed = gsubtreed.inner_loop(local, cref) except Exception: # pragma: no cover ret.append(traceback.format_exc().splitlines()) finally: gsubtreed.Pusher.FAKE_THREADING = False sys.stdout = stdout sys.stderr = stderr root_logger.removeHandler(shandler) # infra.libs.git2.repo logs this message if the command took longer than # 1s to run. This causes test flakes occasionally. log_lines = [x for x in logout.getvalue().splitlines() if "Finished in " not in x] ret.append({"log output": log_lines}) ret.append({"inner_loop success": success, "processed": processed})
def run(): stdout = sys.stdout stderr = sys.stderr logout = StringIO() root_logger = logging.getLogger() shandler = logging.StreamHandler(logout) shandler.setFormatter(LogFormatter('%(levelname)s: %(message)s')) root_logger.addHandler(shandler) shandler.setLevel(logging.INFO) # Run pusher threads sequentially and deterministically. gsubtreed.Pusher.FAKE_THREADING = True success = False processed = {} try: with open(os.devnull, 'w') as dn: # TODO(iannucci): Let expect_tests absorb stdio sys.stderr = sys.stdout = dn local.reify() success, processed = gsubtreed.inner_loop(local, cref) except Exception: # pragma: no cover ret.append(traceback.format_exc().splitlines()) finally: gsubtreed.Pusher.FAKE_THREADING = False sys.stdout = stdout sys.stderr = stderr root_logger.removeHandler(shandler) ret.append({'log output': logout.getvalue().splitlines()}) ret.append({ 'inner_loop success': success, 'processed': processed, })
def outer_loop_iteration(): success, paths_counts = gsubtreed.inner_loop(opts.repo, cref) for path, count in paths_counts.iteritems(): summary[path] += count commits_counter.increment_by(count, fields={'path': path}) return success