Exemplo n.º 1
0
def runOneBenchmark(info, benchmark, framework, platform, backend, reporters,
                    lock):
    assert "treatment" in info, "Treatment is missing in info"
    getLogger().info("Running {}".format(benchmark["path"]))

    minfo = copy.deepcopy(info["treatment"])
    if "shared_libs" in info:
        minfo["shared_libs"] = info["shared_libs"]
    try:
        data = _runOnePass(minfo, benchmark, framework, platform)
        meta = None
        if "control" in info and benchmark["tests"][0]["metric"] == "delay":
            cinfo = copy.deepcopy(info["control"])
            if "shared_libs" in info:
                cinfo["shared_libs"] = info["shared_libs"]
            control = _runOnePass(cinfo, benchmark, framework, platform)
            bname = benchmark["model"]["name"]
            data = _mergeDelayData(data, control, bname)
        if benchmark["tests"][0]["metric"] != "generic":
            data = _adjustData(info, data)
        meta = _retrieveMeta(info, benchmark, platform, framework, backend)

        result = {"meta": meta, "data": data}
    except Exception as e:
        # Catch all exceptions so that failure in one test does not
        # affect other tests
        getLogger().info("Exception caught when running benchmark")
        getLogger().info(e)
        data = None
    if data is None or len(data) == 0:
        name = platform.getMangledName()
        model_name = ""
        if "model" in benchmark and "name" in benchmark["model"]:
            model_name = benchmark["model"]["name"]
        commit_hash = ""
        if "commit" in info["treatment"]:
            commit_hash = info["treatment"]["commit"]
        getLogger().info("No data collected for ".format(model_name) +
                         "on {}. ".format(name) +
                         "The run may be failed for " +
                         "{}".format(commit_hash))
        return False

    with lock:
        for reporter in reporters:
            reporter.report(result)

    if "regression_commits" in info and \
            info["run_type"] == "benchmark" and \
            getArgs().local_reporter:
        from regression_detectors.regression_detectors import checkRegressions
        checkRegressions(info, platform, framework, benchmark, reporters,
                         result['meta'],
                         getArgs().local_reporter)
    time.sleep(5)
    return True
Exemplo n.º 2
0
def runOneBenchmark(info,
                    benchmark,
                    framework,
                    platform,
                    backend,
                    reporters,
                    lock,
                    cooldown=None,
                    user_identifier=None,
                    local_reporter=None):
    assert "treatment" in info, "Treatment is missing in info"
    getLogger().info("Running {}".format(benchmark["path"]))

    status = 0
    minfo = copy.deepcopy(info["treatment"])
    mbenchmark = copy.deepcopy(benchmark)
    if "shared_libs" in info:
        minfo["shared_libs"] = info["shared_libs"]
    try:
        # invalidate CPU cache
        [1.0 for _ in range(20 << 20)]
        gc.collect()
        data = _runOnePass(minfo, mbenchmark, framework, platform)
        status = status | getRunStatus()
        meta = None
        if "control" in info:
            cinfo = copy.deepcopy(info["control"])
            if "shared_libs" in info:
                cinfo["shared_libs"] = info["shared_libs"]
            # cool down between treatment and control
            if "model" in benchmark and "cooldown" in benchmark["model"]:
                cooldown = float(benchmark["model"]["cooldown"])
            time.sleep(cooldown)
            # invalidate CPU cache
            [1.0 for _ in range(20 << 20)]
            gc.collect()
            control = _runOnePass(cinfo, benchmark, framework, platform)
            status = status | getRunStatus()
            bname = benchmark["model"]["name"]
            data = _mergeDelayData(data, control, bname)
        if benchmark["tests"][0]["metric"] != "generic":
            data = _adjustData(info, data)
        meta = _retrieveMeta(info, benchmark, platform, framework, backend,
                             user_identifier)
        data = _retrieveInfo(info, data)
        result = {"meta": meta, "data": data}
    except Exception as e:
        # Catch all exceptions so that failure in one test does not
        # affect other tests
        getLogger().info("Exception caught when running benchmark")
        getLogger().info(e)
        data = None
        status = 2
        setRunStatus(status)
        getLogger().error(traceback.format_exc())

        # Set result meta and data to default values to that
        # the reporter will not try to key into a None
        result = {"meta": {}, "data": []}

    if data is None or len(data) == 0:
        name = platform.getMangledName()
        model_name = ""
        if "model" in benchmark and "name" in benchmark["model"]:
            model_name = benchmark["model"]["name"]
        commit_hash = ""
        if "commit" in info["treatment"]:
            commit_hash = info["treatment"]["commit"]
        getLogger().info("No data collected for ".format(model_name) +
                         "on {}. ".format(name) +
                         "The run may be failed for " +
                         "{}".format(commit_hash))
        return status

    with lock:
        for reporter in reporters:
            reporter.report(result)

    if "regression_commits" in info and \
            info["run_type"] == "benchmark" and local_reporter:
        from regression_detectors.regression_detectors import checkRegressions
        checkRegressions(info, platform, framework, benchmark, reporters,
                         result['meta'], local_reporter)
    return status