def pytest_runtest_protocol(item):
    enabled = item.config.getoption('fault_handler')
    timeout = item.config.getoption('fault_handler_timeout')
    timeout_supported = timeout_support_available()
    if enabled and timeout > 0 and timeout_supported:
        import faulthandler
        stderr = item.config.fault_handler_stderr
        faulthandler.dump_traceback_later(timeout, file=stderr)
        try:
            yield
        finally:
            faulthandler.cancel_dump_traceback_later()
    else:
        yield
Example #2
0
    def pytest_exception_interact(self) -> None:
        """Cancel any traceback dumping due to an interactive exception being
        raised."""
        import faulthandler

        faulthandler.cancel_dump_traceback_later()
Example #3
0
 def tearDown(self):
     self.stop_alarm()
     signal.signal(signal.SIGALRM, self.orig_handler)
     faulthandler.cancel_dump_traceback_later()
Example #4
0
def runtest(ns, test):
    """Run a single test.

    test -- the name of the test
    verbose -- if true, print more messages
    quiet -- if true, don't print 'skipped' messages (probably redundant)
    huntrleaks -- run multiple times to test for leaks; requires a debug
                  build; a triple corresponding to -R's three arguments
    output_on_failure -- if true, display test output on failure
    timeout -- dump the traceback and exit if a test takes more than
               timeout seconds
    failfast, match_tests -- See regrtest command-line flags for these.
    pgo -- if true, suppress any info irrelevant to a generating a PGO build

    Returns the tuple result, test_time, where result is one of the constants:
        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
    """

    verbose = ns.verbose
    quiet = ns.quiet
    huntrleaks = ns.huntrleaks
    output_on_failure = ns.verbose3
    failfast = ns.failfast
    match_tests = ns.match_tests
    timeout = ns.timeout
    pgo = ns.pgo

    use_timeout = (timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(timeout, exit=True)
    try:
        support.match_tests = match_tests
        if failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            # Reuse the same instance to all calls to runtest(). Some
            # tests keep a reference to sys.stdout or sys.stderr
            # (eg. test_argparse).
            if runtest.stringio is None:
                stream = io.StringIO()
                runtest.stringio = stream
            else:
                stream = runtest.stringio
                stream.seek(0)
                stream.truncate()

            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(ns, test, verbose, quiet, huntrleaks,
                                       display_failure=False, pgo=pgo)
                if result[0] == FAILED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = verbose  # Tell tests to be moderately quiet
            result = runtest_inner(ns, test, verbose, quiet, huntrleaks,
                                   display_failure=not verbose, pgo=pgo)
        return result
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, verbose)
def run_tests_multiprocess(regrtest):
    output = queue.Queue()
    pending = MultiprocessIterator(regrtest.tests)
    test_timeout = regrtest.ns.timeout
    use_timeout = (test_timeout is not None)

    workers = [
        MultiprocessThread(pending, output, regrtest.ns)
        for i in range(regrtest.ns.use_mp)
    ]
    print("Run tests in parallel using %s child processes" % len(workers))
    for worker in workers:
        worker.start()

    def get_running(workers):
        running = []
        for worker in workers:
            current_test = worker.current_test
            if not current_test:
                continue
            dt = time.monotonic() - worker.start_time
            if dt >= PROGRESS_MIN_TIME:
                text = '%s (%s)' % (current_test, format_duration(dt))
                running.append(text)
        return running

    finished = 0
    test_index = 1
    get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
    try:
        while finished < regrtest.ns.use_mp:
            if use_timeout:
                faulthandler.dump_traceback_later(test_timeout, exit=True)

            try:
                item = output.get(timeout=get_timeout)
            except queue.Empty:
                running = get_running(workers)
                if running and not regrtest.ns.pgo:
                    print('running: %s' % ', '.join(running), flush=True)
                continue

            test, stdout, stderr, result = item
            if test is None:
                finished += 1
                continue
            regrtest.accumulate_result(test, result)

            # Display progress
            ok, test_time, xml_data = result
            text = format_test_result(test, ok)
            if (ok not in (CHILD_ERROR, INTERRUPTED)
                    and test_time >= PROGRESS_MIN_TIME
                    and not regrtest.ns.pgo):
                text += ' (%s)' % format_duration(test_time)
            elif ok == CHILD_ERROR:
                text = '%s (%s)' % (text, test_time)
            running = get_running(workers)
            if running and not regrtest.ns.pgo:
                text += ' -- running: %s' % ', '.join(running)
            regrtest.display_progress(test_index, text)

            # Copy stdout and stderr from the child process
            if stdout:
                print(stdout, flush=True)
            if stderr and not regrtest.ns.pgo:
                print(stderr, file=sys.stderr, flush=True)

            if result[0] == INTERRUPTED:
                raise KeyboardInterrupt
            test_index += 1
    except KeyboardInterrupt:
        regrtest.interrupted = True
        pending.interrupted = True
        print()
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()

    # If tests are interrupted, wait until tests complete
    wait_start = time.monotonic()
    while True:
        running = [worker.current_test for worker in workers]
        running = list(filter(bool, running))
        if not running:
            break

        dt = time.monotonic() - wait_start
        line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
        if dt >= WAIT_PROGRESS:
            line = "%s since %.0f sec" % (line, dt)
        print(line, flush=True)
        for worker in workers:
            worker.join(WAIT_PROGRESS)
def _cancel_traceback_dump():
    """Cancel traceback dumping if timeout support is available.
    """
    if timeout_support_available():
        import faulthandler
        faulthandler.cancel_dump_traceback_later()
Example #7
0
 def tearDownClass(cls):
     cls.stop_alarm()
     signal.signal(signal.SIGALRM, cls.orig_handler)
     if hasattr(faulthandler, 'cancel_dump_traceback_later'):
         faulthandler.cancel_dump_traceback_later()
Example #8
0
def runtest(ns, test):
    """Run a single test.

    ns -- regrtest namespace of options
    test -- the name of the test

    Returns the tuple (result, test_time), where result is one of the
    constants:

        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
    """

    output_on_failure = ns.verbose3

    use_timeout = (ns.timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(ns.timeout, exit=True)
    try:
        support.match_tests = ns.match_tests
        if ns.failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            # Reuse the same instance to all calls to runtest(). Some
            # tests keep a reference to sys.stdout or sys.stderr
            # (eg. test_argparse).
            if runtest.stringio is None:
                stream = io.StringIO()
                runtest.stringio = stream
            else:
                stream = runtest.stringio
                stream.seek(0)
                stream.truncate()

            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(ns, test, display_failure=False)
                if result[0] != PASSED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = ns.verbose  # Tell tests to be moderately quiet
            result = runtest_inner(ns, test, display_failure=not ns.verbose)
        return result
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, ns.verbose)
Example #9
0
def main(args):
    # Set up faulthandler
    faulthandler.enable()
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info(f'Using random seed {args.seed}...')
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)
    char_vectors = util.torch_from_json(args.char_emb_file)

    # Get model
    log.info('Building model...')
    model_params = {
        'word_vectors': word_vectors,
        'char_vectors': char_vectors,
        'args': args
    }
    model = get_model(args.model, model_params)
    print('Model size: {:f} MB'.format(
        sum(p.nelement() * p.element_size()
            for p in model.parameters()) / (1024 * 1024)))
    # model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info(f'Loading checkpoint from {args.load_path}...')
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info(f'Starting epoch {epoch}...')
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                progress_bar.set_description(
                    'Batch data_loading finished'.ljust(30))
                progress_bar.refresh()

                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                cc_idxs = cc_idxs.to(device)
                qc_idxs = qc_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()
                progress_bar.set_description(
                    'Batch initialization finished'.ljust(30))
                progress_bar.refresh()

                # Forward
                faulthandler.dump_traceback_later(timeout=3)
                log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs)
                faulthandler.cancel_dump_traceback_later()
                progress_bar.set_description(
                    'Batch forward finished'.ljust(30))
                progress_bar.refresh()
                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()

                # Backward
                faulthandler.dump_traceback_later(timeout=3)
                loss.backward()
                faulthandler.cancel_dump_traceback_later()
                progress_bar.set_description(
                    'Batch backward finished'.ljust(30))
                progress_bar.refresh()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                progress_bar.set_description('Optimization finished'.ljust(30))
                progress_bar.refresh()
                scheduler.step()
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info(f'Evaluating at step {step}...')
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    progress_bar.set_description(
                        'Evaluation finished'.ljust(30))
                    progress_bar.refresh()
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join(f'{k}: {v:05.2f}'
                                            for k, v in results.items())
                    log.info(f'Dev {results_str}')

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar(f'dev/{k}', v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
def _cancel_traceback_dump():
    """Cancel traceback dumping if timeout support is available.
    """
    if timeout_support_available():
        import faulthandler
        faulthandler.cancel_dump_traceback_later()
Example #11
0
def run_tests_multiprocess(regrtest):
    output = queue.Queue()
    pending = MultiprocessIterator(regrtest.tests)
    test_timeout = regrtest.ns.timeout
    use_timeout = (test_timeout is not None)

    workers = [MultiprocessThread(pending, output, regrtest.ns)
               for i in range(regrtest.ns.use_mp)]
    print("Run tests in parallel using %s child processes"
          % len(workers))
    for worker in workers:
        worker.start()

    def get_running(workers):
        running = []
        for worker in workers:
            current_test = worker.current_test
            if not current_test:
                continue
            dt = time.monotonic() - worker.start_time
            if dt >= PROGRESS_MIN_TIME:
                text = '%s (%s)' % (current_test, format_duration(dt))
                running.append(text)
        return running

    finished = 0
    test_index = 1
    get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
    try:
        while finished < regrtest.ns.use_mp:
            if use_timeout:
                faulthandler.dump_traceback_later(test_timeout, exit=True)

            try:
                item = output.get(timeout=get_timeout)
            except queue.Empty:
                running = get_running(workers)
                if running and not regrtest.ns.pgo:
                    print('running: %s' % ', '.join(running), flush=True)
                continue

            test, stdout, stderr, result = item
            if test is None:
                finished += 1
                continue
            regrtest.accumulate_result(test, result)

            # Display progress
            ok, test_time = result
            text = format_test_result(test, ok)
            if (ok not in (CHILD_ERROR, INTERRUPTED)
                and test_time >= PROGRESS_MIN_TIME
                and not regrtest.ns.pgo):
                text += ' (%s)' % format_duration(test_time)
            elif ok == CHILD_ERROR:
                text = '%s (%s)' % (text, test_time)
            running = get_running(workers)
            if running and not regrtest.ns.pgo:
                text += ' -- running: %s' % ', '.join(running)
            regrtest.display_progress(test_index, text)

            # Copy stdout and stderr from the child process
            if stdout:
                print(stdout, flush=True)
            if stderr and not regrtest.ns.pgo:
                print(stderr, file=sys.stderr, flush=True)

            if result[0] == INTERRUPTED:
                raise KeyboardInterrupt
            test_index += 1
    except KeyboardInterrupt:
        regrtest.interrupted = True
        pending.interrupted = True
        print()
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()

    # If tests are interrupted, wait until tests complete
    wait_start = time.monotonic()
    while True:
        running = [worker.current_test for worker in workers]
        running = list(filter(bool, running))
        if not running:
            break

        dt = time.monotonic() - wait_start
        line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
        if dt >= WAIT_PROGRESS:
            line = "%s since %.0f sec" % (line, dt)
        print(line, flush=True)
        for worker in workers:
            worker.join(WAIT_PROGRESS)
Example #12
0
def exit_on_deadlock():
    dump_traceback_later(timeout=30, exit=True)
    yield
    cancel_dump_traceback_later()
Example #13
0
def _runtest(ns: Namespace, test_name: str) -> TestResult:
    # Handle faulthandler timeout, capture stdout+stderr, XML serialization
    # and measure time.

    output_on_failure = ns.verbose3

    use_timeout = (ns.timeout is not None
                   and threading_helper.can_start_thread)
    if use_timeout:
        faulthandler.dump_traceback_later(ns.timeout, exit=True)

    start_time = time.perf_counter()
    try:
        support.set_match_tests(ns.match_tests, ns.ignore_tests)
        support.junit_xml_list = xml_list = [] if ns.xmlpath else None
        if ns.failfast:
            support.failfast = True

        if output_on_failure:
            support.verbose = True

            stream = io.StringIO()
            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            print_warning = support.print_warning
            orig_print_warnings_stderr = print_warning.orig_stderr

            output = None
            try:
                sys.stdout = stream
                sys.stderr = stream
                # print_warning() writes into the temporary stream to preserve
                # messages order. If support.environment_altered becomes true,
                # warnings will be written to sys.stderr below.
                print_warning.orig_stderr = stream

                result = _runtest_inner(ns, test_name, display_failure=False)
                if not isinstance(result, Passed):
                    output = stream.getvalue()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
                print_warning.orig_stderr = orig_print_warnings_stderr

            if output is not None:
                sys.stderr.write(output)
                sys.stderr.flush()
        else:
            # Tell tests to be moderately quiet
            support.verbose = ns.verbose

            result = _runtest_inner(ns,
                                    test_name,
                                    display_failure=not ns.verbose)

        if xml_list:
            import xml.etree.ElementTree as ET
            result.xml_data = [
                ET.tostring(x).decode('us-ascii') for x in xml_list
            ]

        result.duration_sec = time.perf_counter() - start_time
        return result
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        support.junit_xml_list = None
Example #14
0
    pythonDict["Variables"], pythonDict["Expression"] = sympy.cse(code)
    for i, expr in enumerate(pythonDict["Variables"]):
        pythonDict["Variables"][i] = {
            "name": str(expr[0]),
            "expr": str(expressionToCode(expr[1], language))
        }
    pythonDict["Expression"] = expressionToCode(pythonDict["Expression"][0],
                                                language)
    return pythonDict


# Begin Parsing
faulthandler.enable()

try:
    # Timeout after 10 seconds if it doesn't return
    faulthandler.dump_traceback_later(10)

    command = sys.argv[1]
    language = sys.argv[2]
    expression = sympy.sympify(sys.argv[3])
    assert expression is not None, "SymPy Error: Cannot evaluate expression!"

    result = None
    if (command == 'eval'):
        print(json.dumps(convertSymPyToDict(expression, language), indent=4))
    sys.stdout.flush()

finally:
    faulthandler.cancel_dump_traceback_later()
 def tearDownClass(cls):
     cls.stop_alarm()
     signal.signal(signal.SIGALRM, cls.orig_handler)
     if hasattr(faulthandler, 'cancel_dump_traceback_later'):
         faulthandler.cancel_dump_traceback_later()
def runtest(ns, test):
    """Run a single test.

    ns -- regrtest namespace of options
    test -- the name of the test

    Returns the tuple (result, test_time, xml_data), where result is one
    of the constants:

        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
        EMPTY_TEST_SUITE test ran no subtests.

    If ns.xmlpath is not None, xml_data is a list containing each
    generated testsuite element.
    """

    output_on_failure = ns.verbose3

    use_timeout = (ns.timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(ns.timeout, exit=True)
    try:
        support.set_match_tests(ns.match_tests)
        # reset the environment_altered flag to detect if a test altered
        # the environment
        support.environment_altered = False
        support.junit_xml_list = xml_list = [] if ns.xmlpath else None
        if ns.failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            stream = io.StringIO()
            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(ns, test, display_failure=False)
                if result[0] != PASSED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = ns.verbose  # Tell tests to be moderately quiet
            result = runtest_inner(ns, test, display_failure=not ns.verbose)

        if xml_list:
            import xml.etree.ElementTree as ET
            xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
        else:
            xml_data = None
        return result + (xml_data, )
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, ns.verbose)
        support.junit_xml_list = None
Example #17
0
 def tearDown(self):
     self.stop_alarm()
     signal.signal(signal.SIGALRM, self.orig_handler)
     if hasattr(faulthandler, 'cancel_dump_traceback_later'):
         faulthandler.cancel_dump_traceback_later()
    def pytest_enter_pdb(self) -> None:
        """Cancel any traceback dumping due to timeout before entering pdb.
        """
        import faulthandler

        faulthandler.cancel_dump_traceback_later()
Example #19
0
def runtest(ns, test):
    """Run a single test.

    ns -- regrtest namespace of options
    test -- the name of the test

    Returns the tuple (result, test_time), where result is one of the
    constants:

        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
    """

    output_on_failure = ns.verbose3

    use_timeout = (ns.timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(ns.timeout, exit=True)
    try:
        support.match_tests = ns.match_tests
        # reset the environment_altered flag to detect if a test altered
        # the environment
        support.environment_altered = False
        if ns.failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            # Reuse the same instance to all calls to runtest(). Some
            # tests keep a reference to sys.stdout or sys.stderr
            # (eg. test_argparse).
            if runtest.stringio is None:
                stream = io.StringIO()
                runtest.stringio = stream
            else:
                stream = runtest.stringio
                stream.seek(0)
                stream.truncate()

            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(ns, test, display_failure=False)
                if result[0] != PASSED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = ns.verbose  # Tell tests to be moderately quiet
            result = runtest_inner(ns, test, display_failure=not ns.verbose)
        return result
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, ns.verbose)
Example #20
0
def runtest(test, verbose, quiet,
            huntrleaks=False, use_resources=None,
            output_on_failure=False, failfast=False, match_tests=None,
            timeout=None):
    """Run a single test.

    test -- the name of the test
    verbose -- if true, print more messages
    quiet -- if true, don't print 'skipped' messages (probably redundant)
    huntrleaks -- run multiple times to test for leaks; requires a debug
                  build; a triple corresponding to -R's three arguments
    use_resources -- list of extra resources to use
    output_on_failure -- if true, display test output on failure
    timeout -- dump the traceback and exit if a test takes more than
               timeout seconds
    failfast, match_tests -- See regrtest command-line flags for these.

    Returns the tuple result, test_time, where result is one of the constants:
        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
    """

    if use_resources is not None:
        support.use_resources = use_resources
    use_timeout = (timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(timeout, exit=True)
    try:
        support.match_tests = match_tests
        if failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            # Reuse the same instance to all calls to runtest(). Some
            # tests keep a reference to sys.stdout or sys.stderr
            # (eg. test_argparse).
            if runtest.stringio is None:
                stream = io.StringIO()
                runtest.stringio = stream
            else:
                stream = runtest.stringio
                stream.seek(0)
                stream.truncate()

            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(test, verbose, quiet, huntrleaks,
                                       display_failure=False)
                if result[0] == FAILED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = verbose  # Tell tests to be moderately quiet
            result = runtest_inner(test, verbose, quiet, huntrleaks,
                                   display_failure=not verbose)
        return result
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, verbose)
Example #21
0
def runtest(ns, test):
    """Run a single test.

    ns -- regrtest namespace of options
    test -- the name of the test

    Returns the tuple (result, test_time, xml_data), where result is one
    of the constants:

        INTERRUPTED      KeyboardInterrupt when run under -j
        RESOURCE_DENIED  test skipped because resource denied
        SKIPPED          test skipped for some other reason
        ENV_CHANGED      test failed because it changed the execution environment
        FAILED           test failed
        PASSED           test passed
        EMPTY_TEST_SUITE test ran no subtests.

    If ns.xmlpath is not None, xml_data is a list containing each
    generated testsuite element.
    """

    output_on_failure = ns.verbose3

    use_timeout = (ns.timeout is not None)
    if use_timeout:
        faulthandler.dump_traceback_later(ns.timeout, exit=True)
    try:
        support.set_match_tests(ns.match_tests)
        # reset the environment_altered flag to detect if a test altered
        # the environment
        support.environment_altered = False
        support.junit_xml_list = xml_list = [] if ns.xmlpath else None
        if ns.failfast:
            support.failfast = True
        if output_on_failure:
            support.verbose = True

            stream = io.StringIO()
            orig_stdout = sys.stdout
            orig_stderr = sys.stderr
            try:
                sys.stdout = stream
                sys.stderr = stream
                result = runtest_inner(ns, test, display_failure=False)
                if result[0] != PASSED:
                    output = stream.getvalue()
                    orig_stderr.write(output)
                    orig_stderr.flush()
            finally:
                sys.stdout = orig_stdout
                sys.stderr = orig_stderr
        else:
            support.verbose = ns.verbose  # Tell tests to be moderately quiet
            result = runtest_inner(ns, test, display_failure=not ns.verbose)

        if xml_list:
            import xml.etree.ElementTree as ET
            xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
        else:
            xml_data = None
        return result + (xml_data,)
    finally:
        if use_timeout:
            faulthandler.cancel_dump_traceback_later()
        cleanup_test_droppings(test, ns.verbose)
        support.junit_xml_list = None