コード例 #1
0
def send_results(config, artifact_processor):
    """ Iterate over the microbenchmark results to generate a comparison
    against the histroical artifacts and send that comparrison to the 
    performance storage service """
    ret_code = 0
    for bench_name in sorted(config.benchmarks):
        filename = "{}.json".format(bench_name)
        gbench_run_results = GBenchRunResult.from_benchmark_file(filename)

        for key in sorted(gbench_run_results.benchmarks.keys()):
            result = gbench_run_results.benchmarks.get(key)
            LOG.debug("%s Result:\n%s", bench_name, result)

            comparison = artifact_processor.get_comparison_for_publish_result(
                bench_name, result, config.lax_tolerance)
            try:
                report_microbenchmark_result(config.publish_results_env,
                                             result.timestamp, config,
                                             comparison)
            except Exception as err:
                LOG.error(
                    "Error reporting results to performance storage service")
                LOG.error(err)
                ret_code = 1

    return ret_code
コード例 #2
0
    def validate_result(self):
        """read the results file"""

        # Make sure the file exists before we try to open it.
        # If it's not there, we'll dump out the contents of the directory to make it
        # easier to determine whether or not we are crazy when running Jenkins.
        if not os.path.exists(self.test_histogram_path):
            LOG.error("=" * 50)
            LOG.error("Directory Contents: {}".format(
                os.path.dirname(self.test_histogram_path)))
            LOG.error("\n".join(
                os.listdir(os.path.dirname(self.test_histogram_path))))
            LOG.error("=" * 50)
            msg = "Unable to find OLTP-Bench result file '{}'".format(
                self.test_histogram_path)
            raise RuntimeError(msg)

        with open(self.test_histogram_path) as oltp_result_file:
            test_result = json.load(oltp_result_file)
        unexpected_result = test_result.get("unexpected", {}).get("HISTOGRAM")
        if unexpected_result and unexpected_result.keys():
            for test in unexpected_result.keys():
                if (unexpected_result[test] != 0):
                    LOG.error(str(unexpected_result))
                    sys.exit(ErrorCode.ERROR)
        else:
            raise RuntimeError(str(unexpected_result))
コード例 #3
0
    def run_db(self):
        """ Start the DB server """

        # Allow ourselves to try to restart the DBMS multiple times
        for attempt in range(constants.DB_START_ATTEMPTS):
            # Kill any other terrier processes that our listening on our target port
            for other_pid in check_port(self.db_port):
                LOG.info(
                    "Killing existing server instance listening on port {} [PID={}]"
                    .format(self.db_port, other_pid))
                os.kill(other_pid, signal.SIGKILL)
            ## FOR

            self.db_output_fd = open(self.db_output_file, "w+")
            self.db_process = subprocess.Popen(self.db_path,
                                               stdout=self.db_output_fd,
                                               stderr=self.db_output_fd)
            try:
                self.wait_for_db()
                break
            except:
                self.stop_db()
                LOG.error("+" * 100)
                LOG.error("DATABASE OUTPUT")
                self.print_output(self.db_output_file)
                if attempt + 1 == constants.DB_START_ATTEMPTS:
                    raise
                traceback.print_exc(file=sys.stdout)
                pass
        ## FOR
        return
コード例 #4
0
 def download_oltp(self):
     rc, stdout, stderr = run_command(
         constants.OLTP_GIT_COMMAND,
         "Error: unable to git clone OLTP source code")
     if rc != ErrorCode.SUCCESS:
         LOG.error(stderr)
         sys.exit(rc)
コード例 #5
0
 def build_oltp(self):
     for command in constants.OLTP_ANT_COMMANDS:
         error_msg = "Error: unable to run \"{}\"".format(command)
         rc, stdout, stderr = run_command(command, error_msg)
         if rc != ErrorCode.SUCCESS:
             LOG.error(stderr)
             sys.exit(rc)
コード例 #6
0
    def run_single_benchmark(self, bench_name, enable_perf):
        """ Execute a single benchmark. The results will be stored in a JSON
        file and an XML file. 
        
        Parameters
        ----------
        bench_name : str
            The name of the benchmark to run.
        enable_perf : bool
            Whether perf should be enabled for all the benchmarks.
        
        Returns
        -------
        ret_val : int
            The return value from the benchmark process. 0 if successful.
        """
        output_file = "{}.json".format(bench_name)
        cmd = self._build_benchmark_cmd(bench_name, output_file, enable_perf)

        # Environment Variables
        os.environ["TERRIER_BENCHMARK_THREADS"] = str(
            self.config.num_threads)  # has to be a str
        os.environ["TERRIER_BENCHMARK_LOGFILE_PATH"] = self.config.logfile_path

        ret_val, err = self._execute_benchmark(cmd)

        if ret_val == 0:
            convert_result_xml(bench_name, output_file)
        else:
            LOG.error(
                f'Unexpected failure of {bench_name} [ret_val={ret_val}]')
            LOG.error(err)

        # return the process exit code
        return ret_val
コード例 #7
0
ファイル: test_oltpbench.py プロジェクト: eppingere/terrier
 def clean_oltp(self):
     rc, stdout, stderr = run_command(constants.OLTPBENCH_GIT_CLEAN_COMMAND,
                                      "Error: unable to clean OLTP repo")
     if rc != ErrorCode.SUCCESS:
         LOG.info(stdout.read())
         LOG.error(stderr.read())
         sys.exit(rc)
コード例 #8
0
    def wait_for_db(self):
        """ Wait for the db server to come up """

        # Check that PID is running
        check_db_process_exists(self.db_process.pid)

        # Wait a bit before checking if we can connect to give the system time to setup
        time.sleep(constants.DB_START_WAIT)

        # flag to check if the db is running
        is_db_running = False

        attempt_number = 0
        # Keep trying to connect to the DBMS until we run out of attempts or we succeeed
        for i in range(constants.DB_CONNECT_ATTEMPTS):
            attempt_number = i + 1
            is_db_running = check_db_running(self.db_host, self.db_port)
            if is_db_running:
                break
            else:
                if attempt_number % 20 == 0:
                    LOG.error(
                        "Failed to connect to DB server [Attempt #{}/{}]".
                        format(attempt_number, constants.DB_CONNECT_ATTEMPTS))
                    # os.system('ps aux | grep terrier | grep {}'.format(self.db_process.pid))
                    # os.system('lsof -i :15721')
                    traceback.print_exc(file=sys.stdout)
                time.sleep(constants.DB_CONNECT_SLEEP)

        handle_db_connection_status(is_db_running,
                                    attempt_number,
                                    db_pid=self.db_process.pid)
        return
コード例 #9
0
ファイル: db_server.py プロジェクト: eppingere/terrier
    def execute(self,
                sql,
                autocommit=True,
                expect_result=True,
                user=DEFAULT_DB_USER):
        """
        Opens up a connection at the DB, and execute a SQL.

        WARNING: this is a really simple (and barely thought-through) client execution interface. Users might need to
        extend this with more arguments or error checking. Only SET SQl command has been tested.

        :param sql: SQL to be execute
        :param autocommit: If the connection should be set to autocommit. For SQL that should not run in transactions,
            this should be set to True, e.g. SET XXX
        :param expect_result: True if results rows are fetched and returned
        :param user: User of this connection
        :return: None if error or not expecting results, rows fetched when expect_result is True
        """
        try:
            with psql.connect(port=self.db_port, host=self.db_host,
                              user=user) as conn:
                conn.set_session(autocommit=autocommit)
                with conn.cursor() as cursor:
                    cursor.execute(sql)

                    if expect_result:
                        rows = cursor.fetchall()
                        return rows

                    return None
        except Exception as e:
            LOG.error(f"Executing SQL = {sql} failed: ")
            # Re-raise this
            raise e
コード例 #10
0
    def run_benchmarks(self, enable_perf):
        """ Return 0 if all benchmarks succeed, otherwise return the error code
            code from the last benchmark to fail
        """
        if not len(self.config.benchmarks):
            LOG.error("Invlid benchmarks were specified to execute. \
                Try not specifying a benchmark and it will execute all.")
            return 0

        ret_val = 0
        benchmark_fail_count = 0

        # iterate over all benchmarks and run them
        for benchmark_count, bench_name in enumerate(self.config.benchmarks):
            LOG.info("Running '{}' with {} threads [{}/{}]".format(
                bench_name, self.config.num_threads, benchmark_count,
                len(self.config.benchmarks)))
            benchmark_ret_val = self.run_single_benchmark(
                bench_name, enable_perf)
            if benchmark_ret_val:
                ret_val = benchmark_ret_val
                benchmark_fail_count += 1

        LOG.info("{PASSED}/{TOTAL} benchmarks passed".format(
            PASSED=len(self.config.benchmarks) - benchmark_fail_count,
            TOTAL=len(self.config.benchmarks)))

        return ret_val
コード例 #11
0
ファイル: common.py プロジェクト: ubi23/noisepage
def print_file(filename):
    """ Print out contents of a file """
    try:
        with open(filename) as file:
            lines = file.readlines()
            for line in lines:
                LOG.info(line.strip())
    except FileNotFoundError:
        LOG.error("file not exists: '{}'".format(filename))
コード例 #12
0
ファイル: test_server.py プロジェクト: simondgt/noisepage
 def handle_test_suite_result(self, test_suite_result):
     """	
     Determine what to do based on the result. If continue_on_error is	
     True then it will mask any errors and return success. Otherwise,	
     it will return the result of the test suite.	
     """
     if test_suite_result is None or test_suite_result != constants.ErrorCode.SUCCESS:
         LOG.error("The test suite failed")
     return test_suite_result
コード例 #13
0
def gen_oltp_trace(tpcc_weight: str, tpcc_rates: List[int],
                   pattern_iter: int) -> bool:
    """
    Generates the trace by running OLTP TPCC benchmark on the built database
    :param tpcc_weight:  Weight for the TPCC workload
    :param tpcc_rates: Arrival rates for each phase in a pattern
    :param pattern_iter:  Number of patterns
    :return: True when data generation succeeds
    """
    # Remove the old query_trace/query_text.csv
    Path(DEFAULT_QUERY_TRACE_FILE).unlink(missing_ok=True)

    # Server is running when this returns
    oltp_server = TestOLTPBench(DEFAULT_OLTP_SERVER_ARGS)
    db_server = oltp_server.db_instance
    db_server.run_db()

    # Download the OLTP repo and build it
    oltp_server.run_pre_suite()

    # Load the workload pattern - based on the tpcc.json in
    # testing/oltpbench/config
    test_case_config = DEFAULT_OLTP_TEST_CASE
    test_case_config["weights"] = tpcc_weight
    test_case = TestCaseOLTPBench(test_case_config)

    # Prep the test case build the result dir
    test_case.run_pre_test()

    rates = tpcc_rates * pattern_iter
    config_forecast_data(test_case.xml_config, rates)

    # Turn on query trace metrics tracing
    db_server.execute("SET query_trace_metrics_enable='true'",
                      expect_result=False)

    # Run the actual test
    ret_val, _, stderr = run_command(test_case.test_command,
                                     test_case.test_error_msg,
                                     cwd=test_case.test_command_cwd)
    if ret_val != ErrorCode.SUCCESS:
        LOG.error(stderr)
        return False

    # Clean up, disconnect the DB
    db_server.stop_db()
    db_server.delete_wal()

    if not Path(DEFAULT_QUERY_TRACE_FILE).exists():
        LOG.error(
            f"Missing {DEFAULT_QUERY_TRACE_FILE} at CWD after running OLTP TPCC"
        )
        return False

    return True
コード例 #14
0
def send_result(env, path, username, password, result):
    """ Send the results to the performance storage service. If the service
    responds with an error code this will raise an error.
     """
    LOG.debug("Sending request to {PATH}".format(PATH=path))
    base_url = get_base_url(env)
    try:
        result = requests.post(base_url + path, json=result, auth=(username, password))
        result.raise_for_status()
    except Exception as err:
        LOG.error(err.response.text)
        raise
コード例 #15
0
 def get_build_data(self, folders, project, branch, build_number=1):
     """ Returns a dict with details about the Jenkins build. This contains
     information about the artifacts associtaed with the build """
     build_url = "{BASE_URL}/{JOB_PATH}/{BUILD_NUM}/api/json".format(
         BASE_URL=self.base_url, JOB_PATH=create_job_path(folders, project, branch), BUILD_NUM=build_number)
     try:
         LOG.debug(
             "Retrieving Jenkins JSON build data from {URL}".format(URL=build_url))
         response = requests.get(build_url)
         response.raise_for_status()
         return response.json()
     except Exception as err:
         LOG.error("Unexpected error when retrieving Jenkins build data")
         LOG.error(err)
         raise
コード例 #16
0
 def get_artifact_data(self, folders, project, branch, build_number, artifact_path):
     """ Returns a dict with details about the Jenkins artifact. The
     contents of the artifact must be JSON for this to work. """
     artifact_url = "{BASE_URL}/{JOB_PATH}/{BUILD_NUM}/artifact/{ARTIFACT}".format(BASE_URL=self.base_url,
                                                                                   JOB_PATH=create_job_path(folders, project, branch), BUILD_NUM=build_number, ARTIFACT=artifact_path)
     try:
         LOG.debug("Retrieving JSON artifact data from {URL}".format(
             URL=artifact_url))
         response = requests.get(artifact_url)
         response.raise_for_status()
         return response.json()
     except Exception as err:
         LOG.error("Unexpected error when retrieving Jenkins artifact")
         LOG.error(err)
         raise
コード例 #17
0
def handle_db_connection_status(is_db_running, attempt_number, db_pid):
    """
    Based on whether the DBMS is running and whether the db_pid exists this
    will print the appropriate message or throw an error.
    """
    if not is_db_running:
        LOG.error(
            "Failed to connect to DB server [Attempt #{ATTEMPT}/{TOTAL_ATTEMPTS}]"
            .format(ATTEMPT=attempt_number,
                    TOTAL_ATTEMPTS=constants.DB_CONNECT_ATTEMPTS))
        check_db_process_exists(db_pid)
        raise RuntimeError('Unable to connect to DBMS.')
    else:
        LOG.info("Connected to server in {} seconds [PID={}]".format(
            attempt_number * constants.DB_CONNECT_SLEEP, db_pid))
コード例 #18
0
 def get_job_data(self, folders, project, branch):
     """ Returns a dict with details about the Jenkins job. This contains
     information about the builds associtaed with the job """
     job_url = "{BASE_URL}/{JOB_PATH}/api/json".format(
         BASE_URL=self.base_url, JOB_PATH=create_job_path(folders, project, branch))
     try:
         LOG.debug(
             "Retrieving list of Jenkins builds from {URL}".format(URL=job_url))
         response = requests.get(job_url)
         response.raise_for_status()
         return response.json()
     except Exception as err:
         LOG.error("Unexpected error when retrieving list of Jenkins builds")
         LOG.error(err)
         raise
コード例 #19
0
 def create_and_load_db(self):
     """
     Create the database and load the data before the actual test execution.
     """
     cmd = "{BIN} -c {XML} -b {BENCHMARK} --create={CREATE} --load={LOAD}".format(
         BIN=constants.OLTPBENCH_DEFAULT_BIN,
         XML=self.xml_config,
         BENCHMARK=self.benchmark,
         CREATE=self.db_create,
         LOAD=self.db_load)
     error_msg = "Error: unable to create and load the database"
     rc, stdout, stderr = run_command(cmd,
                                      error_msg=error_msg,
                                      cwd=self.test_command_cwd)
     if rc != ErrorCode.SUCCESS:
         LOG.error(stderr)
         raise RuntimeError(error_msg)
コード例 #20
0
ファイル: common.py プロジェクト: ubi23/noisepage
def run_check_pids(pid):
    """ 
    Fork a subprocess with sudo privilege to check if the given pid exists,
    because psutil requires sudo privilege.
    """

    cmd = "python3 {SCRIPT} {PID}".format(SCRIPT=constants.FILE_CHECK_PIDS,
                                          PID=pid)
    rc, stdout, _ = run_as_root(cmd, printable=False)

    if rc != constants.ErrorCode.SUCCESS:
        LOG.error(
            "Error occured in run_check_pid_exists for [PID={}]".format(pid))
        return False

    res_str = stdout.readline().decode("utf-8").rstrip("\n")
    return res_str == constants.CommandLineStr.TRUE
コード例 #21
0
def parse_db_metadata():
    """ Lookup the DB version from the version.h file. This is error prone
    due to it being reliant on file location and no tests that will fail
    if the version.h moves and this is not updated. We use a fallback of
    unknown result so the tests won't fail if this happens"""
    regex = r"NOISEPAGE_VERSION[=\s].*(\d.\d.\d)"
    CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
    db_metadata = {'noisepage': {'db_version': UNKNOWN_RESULT}}
    try:
        with open(CURRENT_DIR +
                  '/../../../../src/include/common/version.h') as version_file:
            match = re.search(regex, version_file.read())
            db_metadata['noisepage']['db_version'] = match.group(1)
    except Exception as err:
        LOG.error(err)

    return db_metadata
コード例 #22
0
    def wait_for_db(self):
        """ Wait for the db server to come up """

        # Check that PID is running
        if not check_pid(self.db_process.pid):
            raise RuntimeError("Unable to find DBMS PID {}".format(
                self.db_process.pid))

        # Wait a bit before checking if we can connect to give the system time to setup
        time.sleep(constants.DB_START_WAIT)

        # flag to check if the db is running
        is_db_running = False

        # Keep trying to connect to the DBMS until we run out of attempts or we succeeed
        for i in range(constants.DB_CONNECT_ATTEMPTS):
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                s.connect((self.db_host, int(self.db_port)))
                s.close()
                LOG.info("Connected to server in {} seconds [PID={}]".format(
                    i * constants.DB_CONNECT_SLEEP, self.db_process.pid))
                is_db_running = True
                break
            except:
                if i > 0 and i % 20 == 0:
                    LOG.error(
                        "Failed to connect to DB server [Attempt #{}/{}]".
                        format(i, constants.DB_CONNECT_ATTEMPTS))
                    # os.system('ps aux | grep terrier | grep {}'.format(self.db_process.pid))
                    # os.system('lsof -i :15721')
                    traceback.print_exc(file=sys.stdout)
                time.sleep(constants.DB_CONNECT_SLEEP)
                continue

        if not is_db_running:
            msg = "Unable to connect to DBMS [PID={} / {}]"
            status = "RUNNING"
            if not check_pid(self.db_process.pid):
                status = "NOT RUNNING"
            msg = msg.format(self.db_process.pid, status)
            raise RuntimeError(msg)
        return
コード例 #23
0
ファイル: parse_data.py プロジェクト: eppingere/terrier
def parse_db_metadata():
    """ Lookup the DB version from the version.h file. This is error prone
    due to it being reliant on file location and no tests that will fail
    if the version.h moves and this is not updated. We use a fallback of
    unknown result so the tests won't fail if this happens"""
    regex = r"NOISEPAGE_VERSION[=\s].*(\d.\d.\d)"
    curr_dir = os.path.dirname(os.path.realpath(__file__))
    # FIXME: The relative path for the version.h may change in the future
    version_file_relative = '../../../../src/include/common/version.h'
    version_file = os.path.join(curr_dir, version_file_relative)
    db_metadata = {'noisepage': {'db_version': UNKNOWN_RESULT}}
    try:
        with open(version_file) as f:
            match = re.search(regex, f.read())
            db_metadata['noisepage']['db_version'] = match.group(1)
    except Exception as err:
        LOG.error(err)

    return db_metadata
コード例 #24
0
def collect_artifact_stats(collectors):
    """ Takes an array of collector classes, executes the collectors and 
    combines the result.
    Args:
        collectors - An array of BaseArtifactStatsCollector sub classes
    Returns:
        exit_code - (int)The exit code of the collection task
        metrics - (dict)The combined metrics from all the collectors
    """
    aggregated_metrics = {}
    exit_code = 0
    try:
        for collector in collectors:
            collector_instance = collector(is_debug=args.debug)
            LOG.info(f'Starting {collector_instance.__class__.__name__} collection')
            try:
                exit_code, results = run_collector(collector_instance)
                check_for_conflicting_metric_keys(aggregated_metrics, results)
                aggregated_metrics.update(results)
            except Exception as err:
                exit_code = 1 if exit_code == 0 else exit_code
                LOG.error(err)
                collector_instance.teardown()
            if exit_code:
                LOG.error(f'{collector_instance.__class__.__name__} failed. Stopping all artifact stats collection')
                break
            LOG.info(f'{collector_instance.__class__.__name__} finished successfully')
    except Exception as err:
        exit_code = 1 if exit_code == 0 else exit_code
        LOG.error(err)

    return exit_code, aggregated_metrics
コード例 #25
0
    def run_single_benchmark(self, bench_name, enable_perf):
        """ Execute a single benchmark. The results will be stored in a JSON
        file and an XML file. """
        output_file = "{}.json".format(bench_name)
        cmd = self._build_benchmark_cmd(bench_name, output_file, enable_perf)

        # Environment Variables
        os.environ["TERRIER_BENCHMARK_THREADS"] = str(
            self.config.num_threads)  # has to be a str
        os.environ["TERRIER_BENCHMARK_LOGFILE_PATH"] = self.config.logfile_path

        ret_val, err = self._execute_benchmark(cmd)

        if ret_val == 0:
            convert_result_xml(bench_name, output_file)
        else:
            LOG.error("Unexpected failure of {BENCHMARK} [ret_val={RET_CODE}]".
                      format(BENCHMARK=bench_name, RET_CODE=ret_val))
            LOG.error(err)

        # return the process exit code
        return ret_val
コード例 #26
0
ファイル: common.py プロジェクト: ubi23/noisepage
def run_collect_mem_info(pid):
    """ 
    Fork a subprocess with sudo privilege to collect the memory info for the
    given pid.
    """

    cmd = "python3 {SCRIPT} {PID}".format(
        SCRIPT=constants.FILE_COLLECT_MEM_INFO, PID=pid)

    rc, stdout, _ = run_as_root(cmd, printable=False)

    if rc != constants.ErrorCode.SUCCESS:
        LOG.error(
            "Error occured in run_collect_mem_info for [PID={}]".format(pid))
        return False

    res_str = stdout.readline().decode("utf-8").rstrip("\n")
    rss, vms = res_str.split(constants.MEM_INFO_SPLITTER)
    rss = int(rss) if rss else None
    vms = int(vms) if vms else None
    mem_info = MemoryInfo(rss, vms)
    return mem_info
コード例 #27
0
    def run_benchmarks(self, enable_perf):
        """ Runs all the microbenchmarks.
        
        Parameters
        ----------
        enable_perf : bool
            Whether perf should be enabled for all the benchmarks.

        Returns
        -------
        ret_val : int
            the return value for the last failed benchmark. If no benchmarks
            fail then it will return 0.
        """
        if not len(self.config.benchmarks):
            LOG.error('Invlid benchmarks were specified to execute. \
                Try not specifying a benchmark and it will execute all.')
            return 0

        ret_val = 0
        benchmark_fail_count = 0

        # iterate over all benchmarks and run them
        for benchmark_count, bench_name in enumerate(self.config.benchmarks):
            LOG.info(
                f"Running '{bench_name}' with {self.config.num_threads} threads [{benchmark_count}/{len(self.config.benchmarks)}]"
            )
            benchmark_ret_val = self.run_single_benchmark(
                bench_name, enable_perf)
            if benchmark_ret_val:
                ret_val = benchmark_ret_val
                benchmark_fail_count += 1

        LOG.info("{PASSED}/{TOTAL} benchmarks passed".format(
            PASSED=len(self.config.benchmarks) - benchmark_fail_count,
            TOTAL=len(self.config.benchmarks)))

        return ret_val
コード例 #28
0
    def run_db(self):
        """ Start the DB server """

        # Allow ourselves to try to restart the DBMS multiple times
        for attempt in range(constants.DB_START_ATTEMPTS):
            # Kill any other terrier processes that our listening on our target port
            kill_processes_listening_on_db_port(self.db_port)

            self.db_output_fd, self.db_process = start_db(
                self.db_path, self.db_output_file)
            try:
                self.wait_for_db()
                break
            except:
                self.stop_db()
                LOG.error("+" * 100)
                LOG.error("DATABASE OUTPUT")
                print_output(self.db_output_file)
                if attempt + 1 == constants.DB_START_ATTEMPTS:
                    raise
                traceback.print_exc(file=sys.stdout)
                pass
        # FOR
        return
コード例 #29
0
ファイル: common.py プロジェクト: ubi23/noisepage
def print_pipe(p):
    """ Print out the memory buffer of subprocess pipes """
    try:
        stdout, stderr = p.communicate()
        if stdout:
            for line in stdout.decode("utf-8").rstrip("\n").split("\n"):
                LOG.info(line)
        if stderr:
            for line in stdout.decode("utf-8").rstrip("\n").split("\n"):
                LOG.error(line)
    except ValueError:
        # This is a dirty workaround
        LOG.error("Error in subprocess communicate")
        LOG.error(
            "Known issue in CPython https://bugs.python.org/issue35182. Please upgrade the Python version."
        )
コード例 #30
0
 def from_benchmark_file(cls, result_file_name):
     """ Create a GBenchRunResult by passing it a json file generated by
     Google benchmark """
     LOG.debug(
         "Loading local benchmark result file {}".format(result_file_name))
     with open(result_file_name) as result_file:
         try:
             raw_results = result_file.read()
             results = json.loads(raw_results)
             return cls(results)
         except Exception as err:
             LOG.error(
                 "Invalid data read from benchmark result file {}".format(
                     result_file_name))
             LOG.error(err)
             LOG.error(raw_results)
             raise