def execute_telemetry_benchmark( command_generator, output_paths, use_xvfb=False): start = time.time() env = os.environ.copy() env['CHROME_HEADLESS'] = '1' # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH return_code = 1 temp_dir = tempfile.mkdtemp('telemetry') try: command = command_generator.generate(temp_dir) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable( command, env=env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: return_code = test_env.run_command_output_to_handle( command, handle, env=env) expected_results_filename = os.path.join(temp_dir, 'test-results.json') if os.path.exists(expected_results_filename): shutil.move(expected_results_filename, output_paths.test_results) else: common.write_interrupted_test_results_to(output_paths.test_results, start) expected_perf_filename = os.path.join(temp_dir, 'histograms.json') shutil.move(expected_perf_filename, output_paths.perf_results) csv_file_path = os.path.join(temp_dir, 'results.csv') if os.path.isfile(csv_file_path): shutil.move(csv_file_path, output_paths.csv_perf_results) except Exception: print ('The following exception may have prevented the code from ' 'outputing structured test results and perf results output:') print traceback.format_exc() finally: # Add ignore_errors=True because otherwise rmtree may fail due to leaky # processes of tests are still holding opened handles to files under # |tempfile_dir|. For example, see crbug.com/865896 shutil.rmtree(temp_dir, ignore_errors=True) print_duration('executing benchmark %s' % command_generator.benchmark, start) # Telemetry sets exit code to -1 to indicate that no stories were run. This # becomes 255 on linux because linux doesn't support -1 so it does modulo: # -1 % 256 == 255. # TODO(crbug.com/1019139): Make 111 be the exit code that means # "no stories were run.". if return_code in (111, -1, 255): print ('Exit code %s indicates that no stories were run, so we are marking ' 'this as a success.' % return_code) return 0 if return_code: return return_code return 0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False): env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env['CHROME_HEADLESS'] = '1' return_code = 0 try: command = command_generator.generate(output_paths.benchmark_path) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable(command, env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: try: return_code = test_env.run_command_output_to_handle( command, handle, env=env) except OSError as e: print( 'Command to run gtest perf test %s failed with an OSError: %s' % (output_paths.name, e)) return_code = 1 if (not os.path.exists(output_paths.perf_results) and os.path.exists(output_paths.logs)): # Get the correct json format from the stdout to write to the perf # results file if gtest does not generate one. results_processor = generate_legacy_perf_dashboard_json.\ LegacyResultsProcessor() graph_json_string = results_processor.GenerateJsonResults( output_paths.logs) with open(output_paths.perf_results, 'w') as fh: fh.write(graph_json_string) except Exception: traceback.print_exc() return_code = 1 if os.path.exists(output_paths.perf_results): if command_generator.executable_name in GTEST_CONVERSION_WHITELIST: with path_util.SysPath(path_util.GetTracingDir()): # pylint: disable=no-name-in-module from tracing.value import gtest_json_converter # pylint: enable=no-name-in-module gtest_json_converter.ConvertGtestJsonFile( output_paths.perf_results) else: print('ERROR: gtest perf test %s did not generate perf output' % output_paths.name) return_code = 1 write_simple_test_results(return_code, output_paths.test_results, output_paths.name) return return_code
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False): env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env['CHROME_HEADLESS'] = '1' return_code = 0 try: command = command_generator.generate(output_paths.benchmark_path) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable(command, env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: return_code = test_env.run_command_output_to_handle(command, handle, env=env) if not os.path.exists(output_paths.perf_results): # Get the correct json format from the stdout to write to the perf # results file if gtest does not generate one. results_processor = generate_legacy_perf_dashboard_json.\ LegacyResultsProcessor() graph_json_string = results_processor.GenerateJsonResults( output_paths.logs) with open(output_paths.perf_results, 'w') as fh: fh.write(graph_json_string) except Exception: traceback.print_exc() return_code = 1 write_legacy_test_results(return_code, output_paths.test_results) return return_code
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False, is_unittest=False): start = time.time() env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env['CHROME_HEADLESS'] = '1' #TODO(crbug/1138988): Some gtests do not implements the unit_test_launcher.cc. # As a result, they will not respect the arguments added by # _generate_shard_args() and will still use the values of GTEST_SHARD_INDEX # and GTEST_TOTAL_SHARDS to run part of the tests. # Removing those environment variables as a workaround. if command_generator._ignore_shard_env_vars: if 'GTEST_TOTAL_SHARDS' in env: env.pop('GTEST_TOTAL_SHARDS') if 'GTEST_SHARD_INDEX' in env: env.pop('GTEST_SHARD_INDEX') return_code = 0 try: command = command_generator.generate(output_paths.benchmark_path) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable( command, env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: try: return_code = test_env.run_command_output_to_handle( command, handle, env=env) except OSError as e: print('Command to run gtest perf test %s failed with an OSError: %s' % (output_paths.name, e)) return_code = 1 if (not os.path.exists(output_paths.perf_results) and os.path.exists(output_paths.logs)): # Get the correct json format from the stdout to write to the perf # results file if gtest does not generate one. results_processor = generate_legacy_perf_dashboard_json.\ LegacyResultsProcessor() graph_json_string = results_processor.GenerateJsonResults( output_paths.logs) with open(output_paths.perf_results, 'w') as fh: fh.write(graph_json_string) except Exception: traceback.print_exc() return_code = 1 if os.path.exists(output_paths.perf_results): if command_generator.executable_name in GTEST_CONVERSION_WHITELIST: with path_util.SysPath(path_util.GetTracingDir()): # pylint: disable=no-name-in-module from tracing.value import gtest_json_converter # pylint: enable=no-name-in-module gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results) else: print('ERROR: gtest perf test %s did not generate perf output' % output_paths.name) return_code = 1 write_simple_test_results(return_code, output_paths.test_results, output_paths.name) if not is_unittest: upload_simple_test_results(return_code, output_paths.name) print_duration( 'executing gtest %s' % command_generator.executable_name, start) return return_code