Example #1
0
def run(argv=None, interactive=True):
    MatplotlibBackend.switch_backend("AGG", sloppy=False)
    parser = ArgumentParser(prog='obspy-runtests',
                            description='A command-line program that runs all '
                                        'ObsPy tests.')
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + get_git_version())
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='verbose mode')
    parser.add_argument('-q', '--quiet', action='store_true',
                        help='quiet mode')
    parser.add_argument('--raise-all-warnings', action='store_true',
                        help='All warnings are raised as exceptions when this '
                             'flag is set. Only for debugging purposes.')

    # filter options
    filter = parser.add_argument_group('Module Filter',
                                       'Providing no modules will test all '
                                       'ObsPy modules which do not require an '
                                       'active network connection.')
    filter.add_argument('-a', '--all', action='store_true',
                        dest='test_all_modules',
                        help='test all modules (including network modules)')
    filter.add_argument('-x', '--exclude', action='append',
                        help='exclude given module from test')
    filter.add_argument('tests', nargs='*',
                        help='test modules to run')

    # timing / profile options
    timing = parser.add_argument_group('Timing/Profile Options')
    timing.add_argument('-t', '--timeit', action='store_true',
                        help='shows accumulated run times of each module')
    timing.add_argument('-s', '--slowest', default=0, type=int, dest='n',
                        help='lists n slowest test cases')
    timing.add_argument('-p', '--profile', action='store_true',
                        help='uses cProfile, saves the results to file ' +
                             'obspy.pstats and prints some profiling numbers')

    # reporting options
    report = parser.add_argument_group('Reporting Options')
    report.add_argument('-r', '--report', action='store_true',
                        help='automatically submit a test report')
    report.add_argument('-d', '--dontask', action='store_true',
                        help="don't explicitly ask for submitting a test "
                             "report")
    report.add_argument('-u', '--server', default='tests.obspy.org',
                        help='report server (default is tests.obspy.org)')
    report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME,
                        help='nodename visible at the report server')
    report.add_argument('-l', '--log', default=None,
                        help='append log file to test report')
    report.add_argument('--ci-url', default=None, dest="ci_url",
                        help='URL to Continuous Integration job page.')
    report.add_argument('--pr-url', default=None,
                        dest="pr_url", help='Github (Pull Request) URL.')

    # other options
    others = parser.add_argument_group('Additional Options')
    others.add_argument('--tutorial', action='store_true',
                        help='add doctests in tutorial')
    others.add_argument('--no-flake8', action='store_true',
                        help='skip code formatting test')
    others.add_argument('--keep-images', action='store_true',
                        help='store images created during image comparison '
                             'tests in subfolders of baseline images')
    others.add_argument('--keep-only-failed-images', action='store_true',
                        help='when storing images created during testing, '
                             'only store failed images and the corresponding '
                             'diff images (but not images that passed the '
                             'corresponding test).')

    args = parser.parse_args(argv)
    # set correct verbosity level
    if args.verbose:
        verbosity = 2
        # raise all NumPy warnings
        np.seterr(all='warn')
    elif args.quiet:
        verbosity = 0
        # ignore user and deprecation warnings
        warnings.simplefilter("ignore", DeprecationWarning)
        warnings.simplefilter("ignore", UserWarning)
        # don't ask to send a report
        args.dontask = True
    else:
        verbosity = 1
        # show all NumPy warnings
        np.seterr(all='print')
        # ignore user warnings
        warnings.simplefilter("ignore", UserWarning)
    # whether to raise any warning that's appearing
    if args.raise_all_warnings:
        # raise all NumPy warnings
        np.seterr(all='raise')
        # raise user and deprecation warnings
        warnings.simplefilter("error", UserWarning)
    # check for send report option or environmental settings
    if args.report or 'OBSPY_REPORT' in os.environ.keys():
        report = True
    else:
        report = False
    if 'OBSPY_REPORT_SERVER' in os.environ.keys():
        args.server = os.environ['OBSPY_REPORT_SERVER']
    # check interactivity settings
    if interactive and args.dontask:
        interactive = False
    if args.keep_images:
        os.environ['OBSPY_KEEP_IMAGES'] = ""
    if args.keep_only_failed_images:
        os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = ""
    if args.no_flake8:
        os.environ['OBSPY_NO_FLAKE8'] = ""

    # All arguments are used by the test runner and should not interfere
    # with any other module that might also parse them, e.g. flake8.
    sys.argv = sys.argv[:1]

    return run_tests(verbosity, args.tests, report, args.log, args.server,
                     args.test_all_modules, args.timeit, interactive, args.n,
                     exclude=args.exclude, tutorial=args.tutorial,
                     hostname=args.hostname, ci_url=args.ci_url,
                     pr_url=args.pr_url)
Example #2
0
def run(argv=None, interactive=True):
    MatplotlibBackend.switch_backend("AGG", sloppy=False)
    parser = ArgumentParser(prog='obspy-runtests',
                            description='A command-line program that runs all '
                            'ObsPy tests.')
    parser.add_argument('-V',
                        '--version',
                        action='version',
                        version='%(prog)s ' + get_git_version())
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='verbose mode')
    parser.add_argument('-q',
                        '--quiet',
                        action='store_true',
                        help='quiet mode')
    parser.add_argument('--raise-all-warnings',
                        action='store_true',
                        help='All warnings are raised as exceptions when this '
                        'flag is set. Only for debugging purposes.')

    # filter options
    filter = parser.add_argument_group(
        'Module Filter', 'Providing no modules will test all '
        'ObsPy modules which do not require an '
        'active network connection.')
    filter.add_argument('-a',
                        '--all',
                        action='store_true',
                        dest='test_all_modules',
                        help='test all modules (including network modules)')
    filter.add_argument('-x',
                        '--exclude',
                        action='append',
                        help='exclude given module from test')
    filter.add_argument('tests', nargs='*', help='test modules to run')

    # timing / profile options
    timing = parser.add_argument_group('Timing/Profile Options')
    timing.add_argument('-t',
                        '--timeit',
                        action='store_true',
                        help='shows accumulated run times of each module')
    timing.add_argument('-s',
                        '--slowest',
                        default=0,
                        type=int,
                        dest='n',
                        help='lists n slowest test cases')
    timing.add_argument('-p',
                        '--profile',
                        action='store_true',
                        help='uses cProfile, saves the results to file ' +
                        'obspy.pstats and prints some profiling numbers')

    # reporting options
    report = parser.add_argument_group('Reporting Options')
    report.add_argument('-r',
                        '--report',
                        action='store_true',
                        help='automatically submit a test report')
    report.add_argument('-d',
                        '--dontask',
                        action='store_true',
                        help="don't explicitly ask for submitting a test "
                        "report")
    report.add_argument('-u',
                        '--server',
                        default='tests.obspy.org',
                        help='report server (default is tests.obspy.org)')
    report.add_argument('-n',
                        '--node',
                        dest='hostname',
                        default=HOSTNAME,
                        help='nodename visible at the report server')
    report.add_argument('-l',
                        '--log',
                        default=None,
                        help='append log file to test report')
    report.add_argument('--ci-url',
                        default=None,
                        dest="ci_url",
                        help='URL to Continuous Integration job page.')
    report.add_argument('--pr-url',
                        default=None,
                        dest="pr_url",
                        help='Github (Pull Request) URL.')

    # other options
    others = parser.add_argument_group('Additional Options')
    others.add_argument('--tutorial',
                        action='store_true',
                        help='add doctests in tutorial')
    others.add_argument('--no-flake8',
                        action='store_true',
                        help='skip code formatting test')
    others.add_argument('--keep-images',
                        action='store_true',
                        help='store images created during image comparison '
                        'tests in subfolders of baseline images')
    others.add_argument('--keep-only-failed-images',
                        action='store_true',
                        help='when storing images created during testing, '
                        'only store failed images and the corresponding '
                        'diff images (but not images that passed the '
                        'corresponding test).')
    args = parser.parse_args(argv)
    # set correct verbosity level
    if args.verbose:
        verbosity = 2
        # raise all NumPy warnings
        np.seterr(all='warn')
    elif args.quiet:
        verbosity = 0
        # ignore user and deprecation warnings
        warnings.simplefilter("ignore", DeprecationWarning)
        warnings.simplefilter("ignore", UserWarning)
        # don't ask to send a report
        args.dontask = True
    else:
        verbosity = 1
        # show all NumPy warnings
        np.seterr(all='print')
        # ignore user warnings
        warnings.simplefilter("ignore", UserWarning)
    # whether to raise any warning that's appearing
    if args.raise_all_warnings:
        # raise all NumPy warnings
        np.seterr(all='raise')
        # raise user and deprecation warnings
        warnings.simplefilter("error", UserWarning)
    # ignore specific warnings
    msg = ('Matplotlib is currently using agg, which is a non-GUI backend, '
           'so cannot show the figure.')
    warnings.filterwarnings("ignore", message=msg)
    # check for send report option or environmental settings
    if args.report or 'OBSPY_REPORT' in os.environ.keys():
        report = True
    else:
        report = False
    if 'OBSPY_REPORT_SERVER' in os.environ.keys():
        args.server = os.environ['OBSPY_REPORT_SERVER']
    # check interactivity settings
    if interactive and args.dontask:
        interactive = False
    if args.keep_images:
        os.environ['OBSPY_KEEP_IMAGES'] = ""
    if args.keep_only_failed_images:
        os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = ""
    if args.no_flake8:
        os.environ['OBSPY_NO_FLAKE8'] = ""

    # All arguments are used by the test runner and should not interfere
    # with any other module that might also parse them, e.g. flake8.
    sys.argv = sys.argv[:1]
    return run_tests(verbosity,
                     args.tests,
                     report,
                     args.log,
                     args.server,
                     args.test_all_modules,
                     args.timeit,
                     interactive,
                     args.n,
                     exclude=args.exclude,
                     tutorial=args.tutorial,
                     hostname=args.hostname,
                     ci_url=args.ci_url,
                     pr_url=args.pr_url)
Example #3
0
def _create_report(ttrs, timetaken, log, server, hostname, sorted_tests,
                   ci_url=None, pr_url=None, import_failures=None):
    # import additional libraries here to speed up normal tests
    from future import standard_library
    with standard_library.hooks():
        import urllib.parse
        import http.client
    import codecs
    from xml.etree import ElementTree
    from xml.sax.saxutils import escape
    if import_failures is None:
        import_failures = {}
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['slowest_tests'] = [("%0.3fs" % dt, "%s" % desc)
                               for (desc, dt) in sorted_tests[:20]]
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except Exception:
            print("Cannot open log file %s" % log)
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except Exception:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        result['obspy'][module]['installed'] = installed
        # add a failed-to-import test module to report with an error
        if module in import_failures:
            result['obspy'][module]['timetaken'] = 0
            result['obspy'][module]['tested'] = True
            result['obspy'][module]['tests'] = 1
            # can't say how many tests would have been in that suite so just
            # leave 0
            result['obspy'][module]['skipped'] = 0
            result['obspy'][module]['failures'] = {}
            result['obspy'][module]['errors'] = {
                'f%s' % (errors): import_failures[module]}
            tests += 1
            errors += 1
            continue
        if module not in ttrs:
            continue
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        skipped += len(ttr.skipped)
        result['obspy'][module]['skipped'] = len(ttr.skipped)
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        if module == "pep8-naming":
            module_ = "pep8ext_naming"
        else:
            module_ = module
        temp = module_.split('.')
        try:
            mod = __import__(module_,
                             fromlist=[native_str(temp[1:])])
        except ImportError:
            version_ = '---'
        else:
            try:
                version_ = mod.__version__
            except AttributeError:
                version_ = '???'
        result['dependencies'][module] = version_
    # get system / environment settings
    result['platform'] = {}
    for func in ['system', 'release', 'version', 'machine',
                 'processor', 'python_version', 'python_implementation',
                 'python_compiler', 'architecture']:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except Exception:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except Exception:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped
    # try to append info on skipped tests:
    result['skipped_tests_details'] = []
    try:
        for module, testresult_ in ttrs.items():
            if testresult_.skipped:
                for skipped_test, skip_message in testresult_.skipped:
                    result['skipped_tests_details'].append(
                        (module, skipped_test.__module__,
                         skipped_test.__class__.__name__,
                         skipped_test._testMethodName, skip_message))
    except Exception:
        exc_type, exc_value, exc_tb = sys.exc_info()
        print("\n".join(traceback.format_exception(exc_type, exc_value,
                                                   exc_tb)))
        result['skipped_tests_details'] = []

    if ci_url is not None:
        result['ciurl'] = ci_url
    if pr_url is not None:
        result['prurl'] = pr_url

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = ElementTree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, (str, native_str)):
                    ElementTree.SubElement(doc, key).text = value
                elif isinstance(value, (str, native_str)):
                    ElementTree.SubElement(doc, key).text = str(value, 'utf-8')
                else:
                    ElementTree.SubElement(doc, key).text = str(value)
            else:
                ElementTree.SubElement(doc, key)
    root = ElementTree.Element("report")
    _dict2xml(root, result)
    xml_doc = ElementTree.tostring(root)
    print()
    # send result to report server
    params = urllib.parse.urlencode({
        'timestamp': timestamp,
        'system': result['platform']['system'],
        'python_version': result['platform']['python_version'],
        'architecture': result['platform']['architecture'],
        'tests': tests,
        'failures': failures,
        'errors': errors,
        'modules': len(ttrs) + len(import_failures),
        'xml': xml_doc
    })
    headers = {"Content-type": "application/x-www-form-urlencoded",
               "Accept": "text/plain"}
    conn = http.client.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = urllib.parse.urlparse(response.msg['location'])
        conn = http.client.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print("Test report has been sent to %s. Thank you!" % (server))
    else:
        print("Error: Could not sent a test report to %s." % (server))
        print(response.reason)
    conn.close()
Example #4
0
def _createReport(ttrs, timetaken, log, server, hostname):
    # import additional libraries here to speed up normal tests
    import httplib
    import urllib
    from urlparse import urlparse
    from xml.sax.saxutils import escape
    import codecs
    from xml.etree import ElementTree as etree
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except:
            print("Cannot open log file %s" % log)
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        if module not in ttrs:
            continue
        result['obspy'][module]['installed'] = installed
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        # skipped is not supported for Python < 2.7
        try:
            skipped += len(ttr.skipped)
            result['obspy'][module]['skipped'] = len(ttr.skipped)
        except AttributeError:
            skipped = ''
            result['obspy'][module]['skipped'] = ''
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        temp = module.split('.')
        try:
            mod = __import__(module, fromlist=temp[1:])
            if module == '_omnipy':
                result['dependencies'][module] = mod.coreVersion()
            else:
                result['dependencies'][module] = mod.__version__
        except:
            result['dependencies'][module] = ''
    # get system / environment settings
    result['platform'] = {}
    for func in [
            'system', 'release', 'version', 'machine', 'processor',
            'python_version', 'python_implementation', 'python_compiler',
            'architecture'
    ]:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.iteritems():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = etree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, unicode):
                    etree.SubElement(doc, key).text = value
                elif isinstance(value, str):
                    etree.SubElement(doc, key).text = unicode(value, 'utf-8')
                else:
                    etree.SubElement(doc, key).text = str(value)
            else:
                etree.SubElement(doc, key)

    root = etree.Element("report")
    _dict2xml(root, result)
    xml_doc = etree.tostring(root)
    print
    # send result to report server
    params = urllib.urlencode({
        'timestamp':
        timestamp,
        'system':
        result['platform']['system'],
        'python_version':
        result['platform']['python_version'],
        'architecture':
        result['platform']['architecture'],
        'tests':
        tests,
        'failures':
        failures,
        'errors':
        errors,
        'modules':
        len(ttrs),
        'xml':
        xml_doc
    })
    headers = {
        "Content-type": "application/x-www-form-urlencoded",
        "Accept": "text/plain"
    }
    conn = httplib.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = urlparse(response.msg['location'])
        conn = httplib.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print("Test report has been sent to %s. Thank you!" % (server))
    else:
        print("Error: Could not sent a test report to %s." % (server))
        print(response.reason)
Example #5
0
def _create_report(ttrs,
                   timetaken,
                   log,
                   server,
                   hostname,
                   sorted_tests,
                   ci_url=None,
                   pr_url=None,
                   import_failures=None):
    """
    If `server` is specified without URL scheme, 'https://' will be used as a
    default.
    """
    # import additional libraries here to speed up normal tests
    import urllib.parse
    import codecs
    from xml.etree import ElementTree
    from xml.sax.saxutils import escape
    if import_failures is None:
        import_failures = {}
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['slowest_tests'] = [("%0.3fs" % dt, "%s" % desc)
                               for (desc, dt) in sorted_tests[:20]]
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except Exception:
            print("Cannot open log file %s" % log)
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except Exception:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        result['obspy'][module]['installed'] = installed
        # add a failed-to-import test module to report with an error
        if module in import_failures:
            result['obspy'][module]['timetaken'] = 0
            result['obspy'][module]['tested'] = True
            result['obspy'][module]['tests'] = 1
            # can't say how many tests would have been in that suite so just
            # leave 0
            result['obspy'][module]['skipped'] = 0
            result['obspy'][module]['failures'] = {}
            result['obspy'][module]['errors'] = {
                'f%s' % (errors): import_failures[module]
            }
            tests += 1
            errors += 1
            continue
        if module not in ttrs:
            continue
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        skipped += len(ttr.skipped)
        result['obspy'][module]['skipped'] = len(ttr.skipped)
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        if module == "pep8-naming":
            module_ = "pep8ext_naming"
        else:
            module_ = module
        temp = module_.split('.')
        try:
            mod = __import__(module_, fromlist=temp[1:])
        except ImportError:
            version_ = '---'
        else:
            try:
                version_ = mod.__version__
            except AttributeError:
                version_ = '???'
        result['dependencies'][module] = version_
    # get system / environment settings
    result['platform'] = {}
    for func in [
            'system', 'release', 'version', 'machine', 'processor',
            'python_version', 'python_implementation', 'python_compiler',
            'architecture'
    ]:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except Exception:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except Exception:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped
    # try to append info on skipped tests:
    result['skipped_tests_details'] = []
    try:
        for module, testresult_ in ttrs.items():
            if testresult_.skipped:
                for skipped_test, skip_message in testresult_.skipped:
                    result['skipped_tests_details'].append(
                        (module, skipped_test.__module__,
                         skipped_test.__class__.__name__,
                         skipped_test._testMethodName, skip_message))
    except Exception:
        exc_type, exc_value, exc_tb = sys.exc_info()
        print("\n".join(traceback.format_exception(exc_type, exc_value,
                                                   exc_tb)))
        result['skipped_tests_details'] = []

    if ci_url is not None:
        result['ciurl'] = ci_url
    if pr_url is not None:
        result['prurl'] = pr_url

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = ElementTree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, str):
                    ElementTree.SubElement(doc, key).text = value
                elif isinstance(value, str):
                    ElementTree.SubElement(doc, key).text = str(value, 'utf-8')
                else:
                    ElementTree.SubElement(doc, key).text = str(value)
            else:
                ElementTree.SubElement(doc, key)

    root = ElementTree.Element("report")
    _dict2xml(root, result)
    xml_doc = ElementTree.tostring(root)
    print()
    # send result to report server
    params = urllib.parse.urlencode({
        'timestamp':
        timestamp,
        'system':
        result['platform']['system'],
        'python_version':
        result['platform']['python_version'],
        'architecture':
        result['platform']['architecture'],
        'tests':
        tests,
        'failures':
        failures,
        'errors':
        errors,
        'modules':
        len(ttrs) + len(import_failures),
        'xml':
        xml_doc
    })
    headers = {
        "Content-type": "application/x-www-form-urlencoded",
        "Accept": "text/plain"
    }
    url = server
    if not urlparse(url).scheme:
        url = "https://" + url
    response = requests.post(url=url,
                             headers=headers,
                             data=params.encode('UTF-8'))
    # get the response
    if response.status_code == 200:
        report_url = response.json().get('url', server)
        print('Your test results have been reported and are available at: '
              '{}\nThank you!'.format(report_url))
    # handle errors
    else:
        print("Error: Could not sent a test report to %s." % (server))
        print(response.reason)
Example #6
0
def run(argv=None, interactive=True):
    try:
        import matplotlib
        matplotlib.use("AGG")
        if matplotlib.get_backend().upper() != "AGG":
            raise Exception()
    except:
        msg = "unable to change backend to 'AGG' (to avoid windows popping up)"
        warnings.warn(msg)

    parser = ArgumentParser(prog='obspy-runtests',
                            description='A command-line program that runs all '
                            'ObsPy tests.')
    parser.add_argument('-V',
                        '--version',
                        action='version',
                        version='%(prog)s ' + get_git_version())
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='verbose mode')
    parser.add_argument('-q',
                        '--quiet',
                        action='store_true',
                        help='quiet mode')

    # filter options
    filter = parser.add_argument_group(
        'Module Filter', 'Providing no modules will test all '
        'ObsPy modules which do not require an '
        'active network connection.')
    filter.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='test all modules (including network modules)')
    filter.add_argument('-x',
                        '--exclude',
                        action='append',
                        help='exclude given module from test')
    filter.add_argument('tests', nargs='*', help='test modules to run')

    # timing / profile options
    timing = parser.add_argument_group('Timing/Profile Options')
    timing.add_argument('-t',
                        '--timeit',
                        action='store_true',
                        help='shows accumulated run times of each module')
    timing.add_argument('-s',
                        '--slowest',
                        default=0,
                        type=int,
                        dest='n',
                        help='lists n slowest test cases')
    timing.add_argument('-p',
                        '--profile',
                        action='store_true',
                        help='uses cProfile, saves the results to file ' +
                        'obspy.pstats and prints some profiling numbers')

    # reporting options
    report = parser.add_argument_group('Reporting Options')
    report.add_argument('-r',
                        '--report',
                        action='store_true',
                        help='automatically submit a test report')
    report.add_argument('-d',
                        '--dontask',
                        action='store_true',
                        help="don't explicitly ask for submitting a test "
                        "report")
    report.add_argument('-u',
                        '--server',
                        default='tests.obspy.org',
                        help='report server (default is tests.obspy.org)')
    report.add_argument('-n',
                        '--node',
                        dest='hostname',
                        default=HOSTNAME,
                        help='nodename visible at the report server')
    report.add_argument('-l',
                        '--log',
                        default=None,
                        help='append log file to test report')

    # other options
    others = parser.add_argument_group('Additional Options')
    others.add_argument('--tutorial',
                        action='store_true',
                        help='add doctests in tutorial')
    others.add_argument('--no-flake8',
                        action='store_true',
                        help='skip code formatting test')
    others.add_argument('--keep-images',
                        action='store_true',
                        help='store images created during image comparison '
                        'tests in subfolders of baseline images')
    others.add_argument('--keep-only-failed-images',
                        action='store_true',
                        help='when storing images created during testing, '
                        'only store failed images and the corresponding '
                        'diff images (but not images that passed the '
                        'corresponding test).')

    args = parser.parse_args(argv)
    # set correct verbosity level
    if args.verbose:
        verbosity = 2
        # raise all NumPy warnings
        np.seterr(all='raise')
        # raise user and deprecation warnings
        warnings.simplefilter("error", UserWarning)
    elif args.quiet:
        verbosity = 0
        # ignore user and deprecation warnings
        warnings.simplefilter("ignore", DeprecationWarning)
        warnings.simplefilter("ignore", UserWarning)
        # don't ask to send a report
        args.dontask = True
    else:
        verbosity = 1
        # show all NumPy warnings
        np.seterr(all='print')
        # ignore user warnings
        warnings.simplefilter("ignore", UserWarning)
    # check for send report option or environmental settings
    if args.report or 'OBSPY_REPORT' in os.environ.keys():
        report = True
    else:
        report = False
    if 'OBSPY_REPORT_SERVER' in os.environ.keys():
        args.server = os.environ['OBSPY_REPORT_SERVER']
    # check interactivity settings
    if interactive and args.dontask:
        interactive = False
    if args.keep_images:
        os.environ['OBSPY_KEEP_IMAGES'] = ""
    if args.keep_only_failed_images:
        os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = ""
    if args.no_flake8:
        os.environ['OBSPY_NO_FLAKE8'] = ""
    return runTests(verbosity,
                    args.tests,
                    report,
                    args.log,
                    args.server,
                    args.all,
                    args.timeit,
                    interactive,
                    args.n,
                    exclude=args.exclude,
                    tutorial=args.tutorial,
                    hostname=args.hostname)
Example #7
0
def _createReport(ttrs, timetaken, log, server, hostname):
    # import additional libraries here to speed up normal tests
    from obspy.core import compatibility
    from xml.sax.saxutils import escape
    import codecs
    from xml.etree import ElementTree as etree
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except:
            print(("Cannot open log file %s" % log))
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        if module not in ttrs:
            continue
        result['obspy'][module]['installed'] = installed
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        # skipped is not supported for Python < 2.7
        try:
            skipped += len(ttr.skipped)
            result['obspy'][module]['skipped'] = len(ttr.skipped)
        except AttributeError:
            skipped = ''
            result['obspy'][module]['skipped'] = ''
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        temp = module.split('.')
        try:
            mod = __import__(module,
                             fromlist=[native_str(temp[1:])])
            if module == '_omnipy':
                result['dependencies'][module] = mod.coreVersion()
            else:
                result['dependencies'][module] = mod.__version__
        except ImportError:
            result['dependencies'][module] = ''
    # get system / environment settings
    result['platform'] = {}
    for func in ['system', 'release', 'version', 'machine',
                 'processor', 'python_version', 'python_implementation',
                 'python_compiler', 'architecture']:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = etree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = value
                elif isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = str(value, 'utf-8')
                else:
                    etree.SubElement(doc, key).text = str(value)
            else:
                etree.SubElement(doc, key)
    root = etree.Element("report")
    _dict2xml(root, result)
    xml_doc = etree.tostring(root)
    print()
    # send result to report server
    params = compatibility.urlencode({
        'timestamp': timestamp,
        'system': result['platform']['system'],
        'python_version': result['platform']['python_version'],
        'architecture': result['platform']['architecture'],
        'tests': tests,
        'failures': failures,
        'errors': errors,
        'modules': len(ttrs),
        'xml': xml_doc
    })
    headers = {"Content-type": "application/x-www-form-urlencoded",
               "Accept": "text/plain"}
    conn = compatibility.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = compatibility.urlparse(response.msg['location'])
        conn = compatibility.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print(("Test report has been sent to %s. Thank you!" % (server)))
    else:
        print(("Error: Could not sent a test report to %s." % (server)))
        print((response.reason))
Example #8
0
def run(argv=None, interactive=True):
    try:
        import matplotlib
        matplotlib.use("AGG")
        if matplotlib.get_backend().upper() != "AGG":
            raise Exception()
    except:
        msg = "unable to change backend to 'AGG' (to avoid windows popping up)"
        warnings.warn(msg)

    parser = ArgumentParser(prog='obspy-runtests',
                            description='A command-line program that runs all '
                                        'ObsPy tests.')
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + get_git_version())
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='verbose mode')
    parser.add_argument('-q', '--quiet', action='store_true',
                        help='quiet mode')

    # filter options
    filter = parser.add_argument_group('Module Filter',
                                       'Providing no modules will test all '
                                       'ObsPy modules which do not require an '
                                       'active network connection.')
    filter.add_argument('-a', '--all', action='store_true',
                        help='test all modules (including network modules)')
    filter.add_argument('-x', '--exclude', action='append',
                        help='exclude given module from test')
    filter.add_argument('tests', nargs='*',
                        help='test modules to run')

    # timing / profile options
    timing = parser.add_argument_group('Timing/Profile Options')
    timing.add_argument('-t', '--timeit', action='store_true',
                        help='shows accumulated run times of each module')
    timing.add_argument('-s', '--slowest', default=0, type=int, dest='n',
                        help='lists n slowest test cases')
    timing.add_argument('-p', '--profile', action='store_true',
                        help='uses cProfile, saves the results to file ' +
                             'obspy.pstats and prints some profiling numbers')

    # reporting options
    report = parser.add_argument_group('Reporting Options')
    report.add_argument('-r', '--report', action='store_true',
                        help='automatically submit a test report')
    report.add_argument('-d', '--dontask', action='store_true',
                        help="don't explicitly ask for submitting a test "
                             "report")
    report.add_argument('-u', '--server', default='tests.obspy.org',
                        help='report server (default is tests.obspy.org)')
    report.add_argument('-n', '--node', dest='hostname', default=HOSTNAME,
                        help='nodename visible at the report server')
    report.add_argument('-l', '--log', default=None,
                        help='append log file to test report')

    # other options
    others = parser.add_argument_group('Additional Options')
    others.add_argument('--tutorial', action='store_true',
                        help='add doctests in tutorial')
    others.add_argument('--no-flake8', action='store_true',
                        help='skip code formatting test')
    others.add_argument('--keep-images', action='store_true',
                        help='store images created during image comparison '
                             'tests in subfolders of baseline images')
    others.add_argument('--keep-only-failed-images', action='store_true',
                        help='when storing images created during testing, '
                             'only store failed images and the corresponding '
                             'diff images (but not images that passed the '
                             'corresponding test).')

    args = parser.parse_args(argv)
    # set correct verbosity level
    if args.verbose:
        verbosity = 2
        # raise all NumPy warnings
        np.seterr(all='raise')
        # raise user and deprecation warnings
        warnings.simplefilter("error", UserWarning)
    elif args.quiet:
        verbosity = 0
        # ignore user and deprecation warnings
        warnings.simplefilter("ignore", DeprecationWarning)
        warnings.simplefilter("ignore", UserWarning)
        # don't ask to send a report
        args.dontask = True
    else:
        verbosity = 1
        # show all NumPy warnings
        np.seterr(all='print')
        # ignore user warnings
        warnings.simplefilter("ignore", UserWarning)
    # check for send report option or environmental settings
    if args.report or 'OBSPY_REPORT' in os.environ.keys():
        report = True
    else:
        report = False
    if 'OBSPY_REPORT_SERVER' in os.environ.keys():
        args.server = os.environ['OBSPY_REPORT_SERVER']
    # check interactivity settings
    if interactive and args.dontask:
        interactive = False
    if args.keep_images:
        os.environ['OBSPY_KEEP_IMAGES'] = ""
    if args.keep_only_failed_images:
        os.environ['OBSPY_KEEP_ONLY_FAILED_IMAGES'] = ""
    if args.no_flake8:
        os.environ['OBSPY_NO_FLAKE8'] = ""
    return runTests(verbosity, args.tests, report, args.log, args.server,
                    args.all, args.timeit, interactive, args.n,
                    exclude=args.exclude, tutorial=args.tutorial,
                    hostname=args.hostname)
Example #9
0
def run(argv=None, interactive=True):
    try:
        import matplotlib

        matplotlib.use("AGG")
        if matplotlib.get_backend().upper() != "AGG":
            raise Exception()
    except:
        msg = "unable to change backend to 'AGG' (to avoid windows popping up)"
        warnings.warn(msg)

    parser = ArgumentParser(prog="obspy-runtests", description="A command-line program that runs all " "ObsPy tests.")
    parser.add_argument("-V", "--version", action="version", version="%(prog)s " + get_git_version())
    parser.add_argument("-v", "--verbose", action="store_true", help="verbose mode")
    parser.add_argument("-q", "--quiet", action="store_true", help="quiet mode")

    # filter options
    filter = parser.add_argument_group(
        "Module Filter",
        "Providing no modules will test all " "ObsPy modules which do not require an " "active network connection.",
    )
    filter.add_argument("-a", "--all", action="store_true", help="test all modules (including network modules)")
    filter.add_argument("-x", "--exclude", action="append", help="exclude given module from test")
    filter.add_argument("tests", nargs="*", help="test modules to run")

    # timing / profile options
    timing = parser.add_argument_group("Timing/Profile Options")
    timing.add_argument("-t", "--timeit", action="store_true", help="shows accumulated run times of each module")
    timing.add_argument("-s", "--slowest", default=0, type=int, dest="n", help="lists n slowest test cases")
    timing.add_argument(
        "-p",
        "--profile",
        action="store_true",
        help="uses cProfile, saves the results to file " + "obspy.pstats and prints some profiling numbers",
    )

    # reporting options
    report = parser.add_argument_group("Reporting Options")
    report.add_argument("-r", "--report", action="store_true", help="automatically submit a test report")
    report.add_argument(
        "-d", "--dontask", action="store_true", help="don't explicitly ask for submitting a test " "report"
    )
    report.add_argument("-u", "--server", default="tests.obspy.org", help="report server (default is tests.obspy.org)")
    report.add_argument("-n", "--node", dest="hostname", default=HOSTNAME, help="nodename visible at the report server")
    report.add_argument("-l", "--log", default=None, help="append log file to test report")

    # other options
    others = parser.add_argument_group("Additional Options")
    others.add_argument("--tutorial", action="store_true", help="add doctests in tutorial")
    others.add_argument("--no-flake8", action="store_true", help="skip code formatting test")
    others.add_argument(
        "--keep-images",
        action="store_true",
        help="store images created during image comparison " "tests in subfolders of baseline images",
    )
    others.add_argument(
        "--keep-only-failed-images",
        action="store_true",
        help="when storing images created during testing, "
        "only store failed images and the corresponding "
        "diff images (but not images that passed the "
        "corresponding test).",
    )

    args = parser.parse_args(argv)
    # set correct verbosity level
    if args.verbose:
        verbosity = 2
        # raise all NumPy warnings
        np.seterr(all="raise")
        # raise user and deprecation warnings
        warnings.simplefilter("error", UserWarning)
    elif args.quiet:
        verbosity = 0
        # ignore user and deprecation warnings
        warnings.simplefilter("ignore", DeprecationWarning)
        warnings.simplefilter("ignore", UserWarning)
        # don't ask to send a report
        args.dontask = True
    else:
        verbosity = 1
        # show all NumPy warnings
        np.seterr(all="print")
        # ignore user warnings
        warnings.simplefilter("ignore", UserWarning)
    # check for send report option or environmental settings
    if args.report or "OBSPY_REPORT" in os.environ.keys():
        report = True
    else:
        report = False
    if "OBSPY_REPORT_SERVER" in os.environ.keys():
        args.server = os.environ["OBSPY_REPORT_SERVER"]
    # check interactivity settings
    if interactive and args.dontask:
        interactive = False
    if args.keep_images:
        os.environ["OBSPY_KEEP_IMAGES"] = ""
    if args.keep_only_failed_images:
        os.environ["OBSPY_KEEP_ONLY_FAILED_IMAGES"] = ""
    if args.no_flake8:
        os.environ["OBSPY_NO_FLAKE8"] = ""
    return runTests(
        verbosity,
        args.tests,
        report,
        args.log,
        args.server,
        args.all,
        args.timeit,
        interactive,
        args.n,
        exclude=args.exclude,
        tutorial=args.tutorial,
        hostname=args.hostname,
    )
Example #10
0
def _createReport(ttrs, timetaken, log, server, hostname, sorted_tests):
    # import additional libraries here to speed up normal tests
    from future import standard_library

    with standard_library.hooks():
        import urllib.parse
        import http.client
    from xml.sax.saxutils import escape
    import codecs
    from xml.etree import ElementTree as etree

    timestamp = int(time.time())
    result = {"timestamp": timestamp}
    result["slowest_tests"] = [("%0.3fs" % dt, "%s" % desc) for (desc, dt) in sorted_tests[:20]]
    result["timetaken"] = timetaken
    if log:
        try:
            data = codecs.open(log, "r", encoding="UTF-8").read()
            result["install_log"] = escape(data)
        except:
            print(("Cannot open log file %s" % log))
    # get ObsPy module versions
    result["obspy"] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except:
        installed = ""
    result["obspy"]["installed"] = installed
    for module in sorted(ALL_MODULES):
        result["obspy"][module] = {}
        if module not in ttrs:
            continue
        result["obspy"][module]["installed"] = installed
        # test results
        ttr = ttrs[module]
        result["obspy"][module]["timetaken"] = ttr.__dict__["timetaken"]
        result["obspy"][module]["tested"] = True
        result["obspy"][module]["tests"] = ttr.testsRun
        # skipped is not supported for Python < 2.7
        try:
            skipped += len(ttr.skipped)
            result["obspy"][module]["skipped"] = len(ttr.skipped)
        except AttributeError:
            skipped = ""
            result["obspy"][module]["skipped"] = ""
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result["obspy"][module]["errors"] = {}
        result["obspy"][module]["failures"] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result["obspy"][module]["failures"]["f%s" % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result["obspy"][module]["failures"]["f%s" % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result["obspy"][module]["errors"]["f%s" % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result["obspy"][module]["errors"]["f%s" % (errors)] = text
                errors += 1
    # get dependencies
    result["dependencies"] = {}
    for module in DEPENDENCIES:
        temp = module.split(".")
        try:
            mod = __import__(module, fromlist=[native_str(temp[1:])])
            if module == "_omnipy":
                result["dependencies"][module] = mod.coreVersion()
            else:
                result["dependencies"][module] = mod.__version__
        except ImportError:
            result["dependencies"][module] = ""
    # get system / environment settings
    result["platform"] = {}
    for func in [
        "system",
        "release",
        "version",
        "machine",
        "processor",
        "python_version",
        "python_implementation",
        "python_compiler",
        "architecture",
    ]:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result["platform"][func] = temp
        except:
            result["platform"][func] = ""
    # set node name to hostname if set
    result["platform"]["node"] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result["platform"]["node"] = result["platform"]["node"].split(".")[0]
    except:
        pass
    # test results
    result["tests"] = tests
    result["errors"] = errors
    result["failures"] = failures
    result["skipped"] = skipped

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split("(")[0].strip()
            if isinstance(value, dict):
                child = etree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = value
                elif isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = str(value, "utf-8")
                else:
                    etree.SubElement(doc, key).text = str(value)
            else:
                etree.SubElement(doc, key)

    root = etree.Element("report")
    _dict2xml(root, result)
    xml_doc = etree.tostring(root)
    print()
    # send result to report server
    params = urllib.parse.urlencode(
        {
            "timestamp": timestamp,
            "system": result["platform"]["system"],
            "python_version": result["platform"]["python_version"],
            "architecture": result["platform"]["architecture"],
            "tests": tests,
            "failures": failures,
            "errors": errors,
            "modules": len(ttrs),
            "xml": xml_doc,
        }
    )
    headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
    conn = http.client.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = urllib.parse.urlparse(response.msg["location"])
        conn = http.client.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print(("Test report has been sent to %s. Thank you!" % (server)))
    else:
        print(("Error: Could not sent a test report to %s." % (server)))
        print((response.reason))