Ejemplo n.º 1
0
def internal_run_test(test_name):
    testcases = get_tests()

    log.add_output(util.get_artifact_path("output.log.html"))

    for testclass in testcases:
        if testclass.__name__ == test_name:
            rd.InitialiseReplay(rd.GlobalEnvironment(), [])

            log.begin_test(test_name, print_header=False)

            util.set_current_test(test_name)

            try:
                instance = testclass()
                instance.invoketest(False)
                suceeded = True
            except Exception as ex:
                log.failure(ex)
                suceeded = False
                
            logfile = rd.GetLogFile()
            if os.path.exists(logfile):
                log.inline_file('RenderDoc log', logfile)

            log.end_test(test_name, print_footer=False)

            rd.ShutdownReplay()

            if suceeded:
                sys.exit(0)
            else:
                sys.exit(1)

    log.error("INTERNAL ERROR: Couldn't find '{}' test to run".format(test_name))
Ejemplo n.º 2
0
def run_tests(test_include: str, test_exclude: str, in_process: bool,
              slow_tests: bool, debugger: bool):
    start_time = time.time()

    rd.InitialiseReplay(rd.GlobalEnvironment(), [])

    # On windows, disable error reporting
    if 'windll' in dir(ctypes):
        ctypes.windll.kernel32.SetErrorMode(
            1 | 2)  # SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX

    # clean up artifacts and temp folder
    if os.path.exists(util.get_artifact_dir()):
        shutil.rmtree(util.get_artifact_dir(), ignore_errors=True)

    if os.path.exists(util.get_tmp_dir()):
        shutil.rmtree(util.get_tmp_dir(), ignore_errors=True)

    log.add_output(util.get_artifact_path("output.log.html"))

    for file in ['testresults.css', 'testresults.js']:
        shutil.copyfile(os.path.join(os.path.dirname(__file__), file),
                        util.get_artifact_path(file))

    log.rawprint(
        '<meta charset="utf-8"><!-- header to prevent output from being processed as html -->'
        +
        '<body><link rel="stylesheet" type="text/css" media="all" href="testresults.css">'
        + '<script src="testresults.js"></script>' +
        '<script id="logoutput" type="preformatted">\n\n\n',
        with_stdout=False)

    plat = os.name
    if plat == 'nt' or 'Windows' in platform.platform():
        plat = 'win32'

    log.header("Tests running for RenderDoc Version {} ({})".format(
        rd.GetVersionString(), rd.GetCommitHash()))
    log.header("On {}".format(platform.platform()))

    log.comment("plat={} git={}".format(platform.platform(),
                                        rd.GetCommitHash()))

    driver = ""

    for api in rd.GraphicsAPI:
        v = rd.GetDriverInformation(api)
        log.print("{} driver: {} {}".format(str(api), str(v.vendor),
                                            v.version))

        # Take the first version number we get, but prefer GL as it's universally available and
        # Produces a nice version number & device combination
        if (api == rd.GraphicsAPI.OpenGL
                or driver == "") and v.vendor != rd.GPUVendor.Unknown:
            driver = v.version

    log.comment("driver={}".format(driver))

    log.print("Demos running from {}".format(util.get_demos_binary()))

    layerInfo = rd.VulkanLayerRegistrationInfo()
    if rd.NeedVulkanLayerRegistration(layerInfo):
        log.print("Vulkan layer needs to be registered: {}".format(
            str(layerInfo.flags)))
        log.print("My JSONs: {}, Other JSONs: {}".format(
            layerInfo.myJSONs, layerInfo.otherJSONs))

        # Update the layer registration without doing anything special first - if running automated we might have
        # granted user-writable permissions to the system files needed to update. If possible we register at user
        # level.
        if layerInfo.flags & rd.VulkanLayerFlags.NeedElevation:
            rd.UpdateVulkanLayerRegistration(True)
        else:
            rd.UpdateVulkanLayerRegistration(False)

        # Check if it succeeded
        reg_needed = rd.NeedVulkanLayerRegistration(layerInfo)

        if reg_needed:
            if plat == 'win32':
                # On windows, try to elevate. This will mean a UAC prompt
                args = sys.argv.copy()
                args.append("--internal_vulkan_register")

                for i in range(len(args)):
                    if os.path.exists(args[i]):
                        args[i] = str(Path(args[i]).resolve())

                if 'renderdoccmd' in sys.executable:
                    args = ['vulkanlayer', '--register', '--system']

                ctypes.windll.shell32.ShellExecuteW(None, "runas",
                                                    sys.executable,
                                                    ' '.join(args), None, 1)

                time.sleep(10)
            else:
                log.print(
                    "Couldn't register vulkan layer properly, might need admin rights"
                )
                sys.exit(1)

        reg_needed = rd.NeedVulkanLayerRegistration(layerInfo)

        if reg_needed:
            log.print(
                "Couldn't register vulkan layer properly, might need admin rights"
            )
            sys.exit(1)

    os.environ['RENDERDOC_DEMOS_DATA'] = util.get_data_path('demos')

    testcase.TestCase.set_test_list(fetch_tests())

    testcases = get_tests()

    include_regexp = re.compile(test_include, re.IGNORECASE)
    exclude_regexp = None
    if test_exclude != '':
        exclude_regexp = re.compile(test_exclude, re.IGNORECASE)
        log.print("Running tests matching '{}' and not matching '{}'".format(
            test_include, test_exclude))
    else:
        log.print("Running tests matching '{}'".format(test_include))

    failedcases = []
    skippedcases = []
    runcases = []

    ver = 0

    if plat == 'win32':
        try:
            ver = sys.getwindowsversion().major
            if ver == 6:
                ver = 7  # Windows 7 is 6.1
        except AttributeError:
            pass

    for testclass in testcases:
        name = testclass.__name__

        instance = testclass()

        supported, unsupported_reason = instance.check_support()

        if not supported:
            log.print("Skipping {} as {}".format(name, unsupported_reason))
            skippedcases.append(testclass)
            continue

        if not include_regexp.search(name):
            log.print("Skipping {} as it doesn't match '{}'".format(
                name, test_include))
            skippedcases.append(testclass)
            continue

        if exclude_regexp is not None and exclude_regexp.search(name):
            log.print("Skipping {} as it matches '{}'".format(
                name, test_exclude))
            skippedcases.append(testclass)
            continue

        if not slow_tests and testclass.slow_test:
            log.print(
                "Skipping {} as it is a slow test, which are not enabled".
                format(name))
            skippedcases.append(testclass)
            continue

        runcases.append((testclass, name, instance))

    for testclass, name, instance in runcases:
        # Print header (and footer) outside the exec so we know they will always be printed successfully
        log.begin_test(name)

        util.set_current_test(name)

        def do(debugMode):
            if in_process:
                instance.invoketest(debugMode)
            else:
                _run_test(testclass, failedcases)

        if debugger:
            do(True)
        else:
            try:
                do(False)
            except Exception as ex:
                log.failure(ex)
                failedcases.append(testclass)

        log.end_test(name)

    duration = time.time() - start_time

    if len(failedcases) > 0:
        logfile = rd.GetLogFile()
        if os.path.exists(logfile):
            log.inline_file('RenderDoc log', logfile)

    hours = int(duration / 3600)
    minutes = int(duration / 60) % 60
    seconds = round(duration % 60)

    log.comment("total={} fail={} skip={} time={}".format(
        len(testcases), len(failedcases), len(skippedcases), duration))
    log.header(
        "Tests complete summary: {} passed out of {} run from {} total in {}:{:02}:{:02}"
        .format(
            len(runcases) - len(failedcases), len(runcases), len(testcases),
            hours, minutes, seconds))
    if len(failedcases) > 0:
        log.print("Failed tests:")
    for testclass in failedcases:
        log.print("  - {}".format(testclass.__name__))

    # Print a proper footer if we got here
    log.rawprint('\n\n\n</script>', with_stdout=False)

    rd.ShutdownReplay()

    if len(failedcases) > 0:
        sys.exit(1)

    sys.exit(0)