def execute_cmd(self): curdir = os.getcwd() os.chdir(self.fabtestconfigpath) command = self.cmd + self.options outputcmd = shlex.split(command) common.run_command(outputcmd) os.chdir(curdir)
def build_libfabric(libfab_install_path, mode): if (os.path.exists(libfab_install_path) != True): os.makedirs(libfab_install_path) config_cmd = ['./configure', f'--prefix={libfab_install_path}'] enable_prov_val = 'yes' if (mode == 'dbg'): config_cmd.append('--enable-debug') elif (mode == 'dl'): enable_prov_val='dl' for prov in common.enabled_prov_list: config_cmd.append(f'--enable-{prov}={enable_prov_val}') for prov in common.disabled_prov_list: config_cmd.append(f'--enable-{prov}=no') config_cmd.append('--disable-opx') # we do not test opx in intel jenkins ci config_cmd.append('--disable-efa') # we do not test efa in intel jenkins ci config_cmd.append('--enable-ze-dlopen') common.run_command(['./autogen.sh']) common.run_command(shlex.split(" ".join(config_cmd))) common.run_command(['make','clean']) common.run_command(['make', '-j32']) common.run_command(['make','install'])
def upload_a_file(self, package, version, file_path): """ Upload a debian file to bintray. """ cmd_args = [self._push_executable] cmd_args += ["--user", self._username] cmd_args += ["--api_key", self._api_key] cmd_args += ["--subject", self._subject] cmd_args += ["--repo", self._repo] cmd_args += ["--package", package] cmd_args += ["--version", version] cmd_args += ["--file_path", file_path] if self._component: cmd_args += ["--component", self._component] if self._distribution: cmd_args += ["--distribution", self._distribution] if self._architecture: cmd_args += ["--architecture", self._architecture] cmd_args += ["--package", package] cmd_args += ["--version", version] cmd_args += ["--file_path", file_path] try: common.run_command(cmd_args) except Exception, ex: raise RuntimeError("Failed to upload file {0} due to {1}".format( file_path, ex))
def upload_a_file(self, package, version, file_path): """ Upload a debian file to bintray. """ cmd_args = [self._push_executable] cmd_args += ["--user", self._username] cmd_args += ["--api_key", self._api_key] cmd_args += ["--subject", self._subject] cmd_args += ["--repo", self._repo] cmd_args += ["--package", package] cmd_args += ["--version", version] cmd_args += ["--file_path", file_path] if self._component: cmd_args += ["--component", self._component] if self._distribution: cmd_args += ["--distribution", self._distribution] if self._architecture: cmd_args += ["--architecture", self._architecture] cmd_args += ["--package", package] cmd_args += ["--version", version] cmd_args += ["--file_path", file_path] try: common.run_command(cmd_args) except Exception, ex: raise RuntimeError("Failed to upload file {0} due to {1}".format(file_path, ex))
def main(): args = parse_args() print("Building hermes using {} into {}".format( args.build_system, args.hermes_build_dir + os.path.sep)) try: os.mkdir(args.hermes_build_dir) except OSError: # It's alright if the file already exists. pass cmake_flags = args.cmake_flags.split() + [ "-DLLVM_BUILD_DIR=" + args.llvm_build_dir, "-DLLVM_SRC_DIR=" + args.llvm_src_dir, "-DCMAKE_BUILD_TYPE=" + args.build_type, ] if args.is_32_bit: cmake_flags += ["-DLLVM_BUILD_32_BITS=On"] if (platform.system() == "Windows" and platform.machine().endswith("64") and is_visual_studio(args.build_system)): cmake_flags += ["-Thost=x64"] if not args.distribute: cmake_flags += ["-DLLVM_ENABLE_ASSERTIONS=On"] if args.enable_asan: cmake_flags += ["-DLLVM_USE_SANITIZER=Address"] if args.opcode_stats: cmake_flags += ["-DHERMESVM_PROFILER_OPCODE=On"] if args.basic_block_profiler: cmake_flags += ["-DHERMESVM_PROFILER_BB=On"] if args.warnings_as_errors: cmake_flags += ["-DHERMES_ENABLE_WERROR=On"] if args.static_link: cmake_flags += ["-DHERMES_STATIC_LINK=On"] if args.fbsource_dir: cmake_flags += ["-DFBSOURCE_DIR=" + args.fbsource_dir] if args.icu_root: cmake_flags += ["-DICU_ROOT=" + args.icu_root] elif (os.environ.get("SANDCASTLE") and platform.system() != "macos" and platform.system() != "Windows"): raise Exception("No ICU path provided on sandcastle") print("CMake flags: {}".format(" ".join(cmake_flags))) hermes_src_dir = os.path.realpath(__file__) # The hermes directory is three directories up from this file. # If this file is moved, make sure to update this. for _ in range(3): hermes_src_dir = os.path.dirname(hermes_src_dir) cmake = which("cmake") # Print the CMake version to assist in diagnosing issues. print("CMake version:\n{}".format( subprocess.check_output([cmake, "--version"], stderr=subprocess.STDOUT))) run_command( [cmake, hermes_src_dir, "-G", args.build_system] + cmake_flags, env=os.environ, cwd=args.hermes_build_dir, )
def build_libfabric(libfab_install_path, mode): if (os.path.exists(libfab_install_path) != True): os.makedirs(libfab_install_path) config_cmd = ['./configure', '--prefix={}'.format(libfab_install_path)] enable_prov_val = 'yes' if (mode == 'dbg'): config_cmd.append('--enable-debug') elif (mode == 'dl'): enable_prov_val = 'dl' for prov in common.enabled_prov_list: config_cmd.append('--enable-{}={}'.format(prov, enable_prov_val)) for prov in common.disabled_prov_list: config_cmd.append('--enable-{}=no'.format(prov)) config_cmd.append('--with-psm2-src={}/opa-psm2-lib'.format(workspace)) common.run_command(['./autogen.sh']) common.run_command(shlex.split(" ".join(config_cmd))) common.run_command(['make', 'clean']) common.run_command(['make']) common.run_command(['make', 'install'])
def edit_group(self, groupname, group): res = common.run_command( ['groupmod', '-g', str(group.gid), '-n', group.name, groupname]) for user in group.members.values(): common.run_command(['usermod', '-a', '-G', group.name, user.name]) return res
def execute_cmd(self): assert (self.osu_mpi_path) p = re.compile('osu_put*') for root, dirs, tests in os.walk(self.osu_mpi_path): for test in tests: # if test in self.disable: # continue if test in self.two_proc_tests: self.n = 2 self.ppn = 1 else: self.n = 4 self.ppn = 2 if (test == 'osu_latency_mp' and self.core_prov == 'verbs'): self.env.append(('IBV_FORK_SAFE', '1')) if (p.search(test) == None): launcher = self.cmd + self.options osu_cmd = os.path.join(root, test) command = launcher + osu_cmd outputcmd = shlex.split(command) common.run_command(outputcmd) if (test == 'osu_latency_mp' and self.core_prov == 'verbs'): self.env.remove(('IBV_FORK_SAFE', '1'))
def main(): """The main method.""" parser = argparse.ArgumentParser(description='A CLI to commit to git.') parser.add_argument('message', nargs='+', help='the commit message') args = parser.parse_args() commit_msg = ' '.join(args.message) run_command(f'git commit -m "{commit_msg}"')
def execute_cmd(self): command = self.cmd + self.options if(self.mpi1.execute_condn): outputcmd = shlex.split(command + self.mpi1.imb_cmd) common.run_command(outputcmd) if (self.rma.execute_condn): outputcmd = shlex.split(command + self.rma.imb_cmd) common.run_command(outputcmd)
def download_app(app_platform, engine_commit_formatted_hash): gclpath, localpath = get_gcs_and_local_path(app_platform, engine_commit_formatted_hash) if os.path.exists(localpath): os.remove(localpath) args = ['cp', gclpath, localpath] common.run_command('gsutil', ' '.join(args)) return localpath
def execute_cmd(self): command = self.cmd + self.options for test_type in self.imb_tests[self.test_group]: self.test_obj = IMBtests(test_type, self.core_prov, self.util_prov) if (self.test_obj.execute_condn): outputcmd = shlex.split(command + self.test_obj.imb_cmd) common.run_command(outputcmd) else: print("IMB-{} not run".format(test_type))
def do_pre_test_run_tasks(self): # Generate WPT metadata files. common.run_command([ sys.executable, os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), "--metadata-output-dir", WPT_METADATA_OUTPUT_DIR, "--additional-expectations", WPT_OVERRIDE_EXPECTATIONS_PATH, "--checked-in-metadata-dir", WPT_CHECKED_IN_METADATA_DIR ])
def clone_and_patch_llvm(args): git = build_git_command(args.http_proxy) if not os.path.exists(args.llvm_src_dir): # If the directory doesn't exist, clone LLVM there. print("Cloning LLVM into {}".format(args.llvm_src_dir)) run_command( git + [ "clone", "--shallow-since", _LLVM_REV_DATE, "https://github.com/llvm-mirror/llvm.git", args.llvm_src_dir, ], retries=3, ) # Checkout a specific revision in LLVM. run_command(git + ["checkout", _LLVM_REV], cwd=args.llvm_src_dir) # Check that the respository is clean. try: run_command(git + ["diff-index", "--quiet", "HEAD"], cwd=args.llvm_src_dir) except subprocess.CalledProcessError: raise Exception("llvm dir is dirty (contains uncommitted changes)") # Apply small edits to LLVM from patch files. run_command( git + [ "apply", "--ignore-space-change", "--ignore-whitespace", os.path.join( os.path.dirname(os.path.realpath(__file__)), "llvm-changes-for-hermes.patch", ), ], cwd=args.llvm_src_dir, ) # Commit the patch. run_command( git + [ "-c", "user.name=nobody", "-c", "user.email='*****@*****.**'", "commit", "-a", "-m", "Patch by Hermes build script", ], cwd=args.llvm_src_dir, )
def build_uh(shmem_dir): oshcc_bin = "{}/bin".format(shmem_dir) os.environ["PATH"] += os.pathsep + oshcc_bin tmp_uh_src = '{}/tests-uh'.format(ci_site_config.shmem_root) shutil.copytree(tmp_uh_src, '{}/tests-uh'.format(shmem_dir)) #os.chdir(shmem_dir) #git_cmd = ['git', 'clone', '--depth', '1', 'https://github.com/openshmem-org/tests-uh.git', 'tests-uh'] #common.run_command(git_cmd) os.chdir('{}/tests-uh'.format(shmem_dir)) common.run_command(['make', '-j4', 'C_feature_tests'])
def clean_up_after_test_run(self): common.run_command([ sys.executable, os.path.join(BLINK_TOOLS_DIR, 'update_wpt_output.py'), '--verbose', '--old-json-output-file-path', self.options.old_json_output_file_path, '--new-json-output-dir', self.options.new_json_output_dir, '--new-json-output-filename', self.options.new_json_output_filename, '--additional-expectations', WPT_OVERRIDE_EXPECTATIONS_PATH ])
def clean_up_after_test_run(self): common.run_command([ sys.executable, os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools', 'update_wpt_output.py'), '--old-json-output-file-path', self.options.old_json_output_file_path, '--new-json-output-dir', self.options.new_json_output_dir, '--new-json-output-filename', self.options.new_json_output_filename, ])
def run(self): common.print_verbose("Running " + self.name + " action") files_to_copy = glob.glob(MetaModel(common.meta_model()).template_for_action(self.name) + "/*", recursive=False) command_to_run = ['/bin/cp', "-R", *files_to_copy, '.'] common.run_command(command_to_run) common.print_raw("Initialized new Java 9 application.") return 0, ""
def add_group(self, group): res = common.run_command( ['groupadd', '-g', str(group.gid), group.name]) for user in group.members.values(): if user in self.users.values(): common.run_command( ['usermod', '-a', '-G', group.name, user.name]) else: self.add_user(user) return res
def main_run(args): annotations_file = tempfile.NamedTemporaryFile() annotations_filename = annotations_file.name annotations_file.close() command_line = [ sys.executable, os.path.join(common.SRC_DIR, 'tools', 'traffic_annotation', 'scripts', 'traffic_annotation_auditor_tests.py'), '--build-path', os.path.join(args.paths['checkout'], 'out', args.build_config_fs), '--annotations-file', annotations_filename, ] rc = common.run_command(command_line) # Update the Google Sheets on success, but only on the Windows trybot. if rc == 0 and is_windows(): print("Tests succeeded. Updating annotations sheet...") config_file = tempfile.NamedTemporaryFile(delete=False) json.dump(SHEET_CONFIG, config_file, indent=4) config_filename = config_file.name config_file.close() command_line = [ 'vpython.bat', os.path.join(common.SRC_DIR, 'tools', 'traffic_annotation', 'scripts', 'update_annotations_sheet.py'), '--force', '--config-file', config_filename, '--annotations-file', annotations_filename, ] rc = common.run_command(command_line) try: os.remove(config_filename) except OSError: pass try: os.remove(annotations_filename) except OSError: pass json.dump( { 'valid': True, 'failures': ['Please refer to stdout for errors.'] if rc else [], }, args.output) return rc
def skip(install_path): if os.getenv('CHANGE_TARGET') is not None: change_target = os.environ['CHANGE_TARGET'] else: change_target = 'main' command = [ '{}/skip.sh'.format(ci_site_config.testpath), '{}'.format(os.environ['WORKSPACE']), '{}'.format(change_target) ] common.run_command(command)
def main(): # First, generate WPT metadata files. common.run_command([ sys.executable, os.path.join(BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), "--metadata-output-dir", WPT_METADATA_DIR, "--additional-expectations", WPT_OVERRIDE_EXPECTATIONS_PATH ]) adapter = WPTTestAdapter() return adapter.run_test()
def build_ISx(shmem_dir): oshcc = '{}/bin/oshcc'.format(shmem_dir) tmp_isx_src = '{}/ISx'.format(ci_site_config.shmem_root) shutil.copytree(tmp_isx_src, '{}/ISx'.format(shmem_dir)) #os.chdir(shmem_dir) #git_cmd = ['git', 'clone', '--depth', '1', 'https://github.com/ParRes/ISx.git', 'ISx'] #common.run_command(git_cmd) os.chdir('{}/ISx/SHMEM'.format(shmem_dir)) common.run_command(['make', 'CC={}'.format(oshcc), 'LDLIBS=-lm'])
def _generate_cut_jar(self, jar_path, config, out_dir): '''Invoke JarCutter tool to customize jar file''' # Ensure the our dir is empty common.recreate_folder(out_dir) # Run the JarCutter common.run_command( "python " + os.path.join(self.script_path, "../jar-cutter/JarCutter.py") + " -c " + config + " -s " + jar_path + " -o " + out_dir, error_message="cutting jar file (%s) failed!" % jar_path)
def publish_package(self, package_dir, tag=None): ''' publish package to npm registry with tag ''' try: cmd_args = ["npm", "publish"] if tag is not None: cmd_args += ["--tag", tag] common.run_command(cmd_args, directory=package_dir) except Exception, e: raise ValueError("Failed to publish package {package} due to {error}"\ .format(package=package_dir, error=e))
def update_version(package_dir, version=None): ''' update version of package ''' try: cmd_args = ["npm", "version", "--no-git-tag-version"] if version is not None: cmd_args.append(version) common.run_command(cmd_args, directory=package_dir) except Exception, e: raise ValueError("Failed to update version of package {package} due to {error}"\ .format(package=package_dir, error=e))
def build_uh(shmem_dir): oshcc_bin = "{}/bin".format(shmem_dir) os.environ["PATH"] += os.pathsep + oshcc_bin os.chdir(shmem_dir) git_cmd = [ 'git', 'clone', '--depth', '1', 'https://github.com/openshmem-org/tests-uh.git', 'tests-uh' ] common.run_command(git_cmd) os.chdir('tests-uh') common.run_command(['make', '-j4', 'C_feature_tests'])
def _generate_cocosruntime_jar_file(self): common.ensure_folder_exists(self.debug_directory) common.run_command( "python " + os.path.join(self.script_path, "../jar-maker/JarMaker.py") + " -s " + self.proj_gplayenginebridge_src_dir + " -s " + self.proj_cocosruntime_src_dir + " -o " + self.out_dir + " -f " + os.path.split(self.sdk_jar_no_dex_obfuscated_path)[-1] + " --ref-lib " + os.path.join(self.script_path, "..", "common", "lib", "android.jar") + " --ref-lib " + os.path.join(self.script_path, "..", "common", "lib", "annotations.jar"))
def main(): """The main function.""" parser = argparse.ArgumentParser( description='A CLI to clone a repo, cd in, then list the contents.') parser.add_argument('url', help='the url of the repo') RpclManager(parser) args = parser.parse_args() url = args.url run_command(f'git clone {url}') path = url.split('/')[-1][:-4] print(color_info('\nClone complete, moving into directory.\n')) exit(path)
def unfreeze(self, storage): """Unfreezes the disk mounted to a mount point.""" if not (storage.file_system_type in Freezer.UNFREEZE_COMMANDS): _log("Does not know how to unfreeze file system type {}".format( storage.file_system_type)) _log("Continuing without unfreezing {}".format(storage.mount_point)) return _log("Unfreezing {}".format(storage.mount_point)) unfreeze_command = Freezer.UNFREEZE_COMMANDS[storage.file_system_type] common.run_command(unfreeze_command.replace("_REPLACED_WITH_MOUNT_POINT", storage.mount_point)) self.frozen.remove(storage)
def build_ISx(shmem_dir): oshcc = '{}/bin/oshcc'.format(shmem_dir) os.chdir(shmem_dir) git_cmd = [ 'git', 'clone', '--depth', '1', 'https://github.com/ParRes/ISx.git', 'ISx' ] common.run_command(git_cmd) os.chdir('ISx/SHMEM') common.run_command(['make', 'CC={}'.format(oshcc), 'LDLIBS=-lm'])
def execute_cmd(self, oneccl_test): if oneccl_test == 'examples': for test in self.examples_tests: command = self.cmd + self.options(oneccl_test) + \ " {}".format(test) outputcmd = shlex.split(command) common.run_command(outputcmd) elif oneccl_test == 'functional': for test in self.functional_tests: command = self.cmd + self.options(oneccl_test) + \ " {}".format(test) outputcmd = shlex.split(command) common.run_command(outputcmd)
def execute_cmd(self, oneccl_test_gpu): if oneccl_test_gpu == 'examples': for test in self.examples_tests: command = self.cmd + self.options(oneccl_test_gpu) + \ f" {test}" outputcmd = shlex.split(command) common.run_command(outputcmd) elif oneccl_test_gpu == 'functional': for test in self.functional_tests: command = self.cmd + self.options(oneccl_test_gpu) + \ f" {test}" outputcmd = shlex.split(command) common.run_command(outputcmd)
def main_run(args): filter_tests = [] if args.filter_file: filter_tests = json.load(args.filter_file) script_args = args.args test_suite = script_args[0] with common.temporary_file() as tempfile_path: cmd = [ os.path.join( args.paths['checkout'], 'build', 'android', 'test_runner.py'), 'gtest', '--release' if 'release' in args.build_config_fs.lower() else '--debug', '--suite', test_suite, '--verbose', '--flakiness-dashboard-server=http://test-results.appspot.com', '--json-results-file', tempfile_path, ] if filter_tests: cmd.extend(['--gtest-filter', ':'.join(filter_tests)]) rc = common.run_command(cmd) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_gtest_test_results(results) json.dump({ 'valid': True, 'failures': parsed_results['failures'], }, args.output) return rc
def get_current_version(self): """ return the current version """ cmd_args = ["dpkg-parsechangelog", "--show-field", "Version"] version = common.run_command(cmd_args, directory=self._repo_dir) return version
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=str, required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' # Compatibility with gtest-based sharding. total_shards = None shard_index = None if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) del env['GTEST_TOTAL_SHARDS'] if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) del env['GTEST_SHARD_INDEX'] sharding_args = [] if total_shards is not None and shard_index is not None: sharding_args = [ '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] try: valid = True rc = 0 try: rc = common.run_command([sys.executable] + rest_args + sharding_args + [ '--write-abbreviated-json-results-to', args.isolated_script_test_output, ], env=env) valid = bool(rc == 0) except Exception: traceback.print_exc() valid = False if not valid: failures = ['(entire test suite)'] with open(args.isolated_script_test_output, 'w') as fp: json.dump({ 'valid': valid, 'failures': failures, }, fp) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument("--output", required=True) parser.add_argument("args", nargs=argparse.REMAINDER) args = parser.parse_args(argv) passthrough_args = args.args if passthrough_args[0] == "--": passthrough_args = passthrough_args[1:] results = {} for filename in os.listdir(common.SCRIPT_DIR): if not filename.endswith(".py"): continue if filename in ("common.py", "get_compile_targets.py"): continue with common.temporary_file() as tempfile_path: rc = common.run_command( [sys.executable, os.path.join(common.SCRIPT_DIR, filename)] + passthrough_args + ["compile_targets", "--output", tempfile_path] ) if rc != 0: return rc with open(tempfile_path) as f: results[filename] = json.load(f) with open(args.output, "w") as f: json.dump(results, f) return 0
def generate_big_version(self): """ Generate the big version according to changelog The big version is the latest version of debian/changelog return: big version """ # If the repository has the debianstatic/repository name/, # create a soft link to debian before compute version debian_exist = self.debian_exist() linked = False if not debian_exist: for filename in os.listdir(self._repo_dir): if filename == "debianstatic": debianstatic_dir = os.path.join(self._repo_dir, "debianstatic") for debianstatic_filename in os.listdir(debianstatic_dir): if debianstatic_filename == self._repo_name: debianstatic_repo_dir = "debianstatic/{0}".format(self._repo_name) common.link_dir(debianstatic_repo_dir, "debian", self._repo_dir) linked = True if not debian_exist and not linked: return None cmd_args = ["dpkg-parsechangelog", "--show-field", "Version"] version = common.run_command(cmd_args, directory=self._repo_dir) if linked: os.remove(os.path.join(self._repo_dir, "debian")) return version
def main(): parser = argparse.ArgumentParser() parser.add_argument("--isolated-script-test-output", type=argparse.FileType("w"), required=True) parser.add_argument("--xvfb", help="Start xvfb.", action="store_true") args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir=".") assert xvfb_proc and openbox_proc, "Failed to start xvfb" try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + ["--write-full-results-to", tempfile_path], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator=".") failures = parsed_results["unexpected_failures"] json.dump( { "valid": bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), "failures": failures.keys(), }, args.isolated_script_test_output, ) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc)
def run(self): common.print_verbose("Running " + self.name + " action") exit_code = 0 for test_dir in glob.iglob('**/test', recursive=True): original_working_directory = os.getcwd() run_directory = os.path.join(original_working_directory, str(test_dir)) common.print_info("Running tests in " + str(run_directory)) common.print_verbose("Changing directory to " + str(run_directory)) os.chdir(run_directory) tests = [] for filename in glob.iglob('**/*.py', recursive=True): tests.append(filename) command = ['/usr/local/bin/python3', '-m', 'unittest'] command.extend(tests) subprocess_exit_code, output = common.run_command(command) if subprocess_exit_code != common.SUCCESS: exit_code = common.FAILED common.print_verbose(output) common.continue_if_failed(subprocess_exit_code, output) common.print_verbose("Changing directory to " + str(original_working_directory)) os.chdir(original_working_directory) return exit_code, ""
def get_device_info(args, failures): """Parses the device info for each attached device, and returns a summary of the device info and any mismatches. Returns: A dict indicating the result. """ if not is_linux(): return {} with common.temporary_file() as tempfile_path: rc = common.run_command([ sys.executable, os.path.join(args.paths['checkout'], 'third_party', 'catapult', 'devil', 'devil', 'android', 'tools', 'device_status.py'), '--json-output', tempfile_path, '--blacklist-file', os.path.join( args.paths['checkout'], 'out', 'bad_devices.json')]) if rc: failures.append('device_status') return {} with open(tempfile_path, 'r') as src: device_info = json.load(src) results = {} results['devices'] = sorted(v['serial'] for v in device_info) details = [ v['ro.build.fingerprint'] for v in device_info if not v['blacklisted']] def unique_build_details(index): return sorted(list(set([v.split(':')[index] for v in details]))) parsed_details = { 'device_names': unique_build_details(0), 'build_versions': unique_build_details(1), 'build_types': unique_build_details(2), } for k, v in parsed_details.iteritems(): if len(v) == 1: results[k] = v[0] else: results[k] = 'MISMATCH' results['%s_list' % k] = v failures.append(k) for v in device_info: if v['blacklisted']: failures.append('Device %s blacklisted' % v['serial']) return results
def _get_mounted_storages(): """Returns a dictionary of Storage objects, indexed by their mount_point. The Storage objects are found using the fstab.""" fstab_info = common.run_command("cat {}".format(_FSTAB_PATH)).split("\n") storages = {} for line in fstab_info: trimmed_line = line.strip() if not trimmed_line or trimmed_line[0] == "#": continue if trimmed_line.find("LABEL=") == 0: # The first line is typically the table header. continue storage = Storage() matched_fs_info = re.search("^\s*(\S+)\s+(\S+)\s+(\S+)", trimmed_line) if matched_fs_info: storage.primary_device_name = matched_fs_info.group(1) storage.mount_point = matched_fs_info.group(2) storage.file_system_type = matched_fs_info.group(3) # Now check if this is a RAID array mdadms = common.run_command("sudo mdadm --detail --scan") if "ARRAY {}".format(storage.primary_device_name) in mdadms: storage.is_raid = True # Now find a full listing of ALL the devices used raid_device_info = common.run_command("sudo mdadm --detail {}".format( storage.primary_device_name)).split("\n") for line in raid_device_info: # We are pulling the devices off of the lines that look like this: # 0 202 97 0 active sync /dev/sdg1 matched_raid_device_info = re.search( "\s*\d+?\s+\d+?\s+\d+\s+\d+.*?(/dev/.+)", line) if matched_raid_device_info: storage.devices.append(matched_raid_device_info.group(1).strip()) else: storage.is_raid = False storage.devices.append(storage.primary_device_name) storages[storage.mount_point] = (storage) # End for loop if _DEBUG: for path, storage in storages.iteritems(): if path and path != "none": _log(storage) return storages
def authenticate(self): ''' Authenticate the npm registry with token ''' try: home = os.path.expanduser("~") user_config = os.path.join(home,".npmrc") f = open(user_config, 'w+') text = "//{registry}/:_authToken={token}"\ .format(registry=self._registry, token=self._token) f.write(text) f.close() cmd_args = ['npm', 'whoami'] common.run_command(cmd_args) except Exception, e: raise ValueError("Failed to authenticate with {registry} due to {error}"\ .format(registry=self._registry, error=e))
def run(): parser = c.prepare_optparser() parser.add_option("-l", "--list", action="store_true", help="Print list of all tests") t = TestHarness(all_tests(), parser) options, args = parser.parse_args() if options.list: for x in t.test_suites: print x.modname for i in x: print ' - ', i print return c.process_options(options) c.run_command(lambda : t.run(options, args), options) if t.import_errors: sys.exit('there were import errors!\n')
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' try: tempfile_dir = tempfile.mkdtemp('telemetry') valid = True failures = [] try: rc = common.run_command([sys.executable] + rest_args + [ '--output-dir', tempfile_dir, '--output-format=json' ], env=env) tempfile_name = os.path.join(tempfile_dir, 'results.json') with open(tempfile_name) as f: results = json.load(f) for value in results['per_page_values']: if value['type'] == 'failure': failures.append(results['pages'][str(value['page_id'])]['name']) valid = bool(rc == 0 or failures) except Exception: traceback.print_exc() valid = False finally: shutil.rmtree(tempfile_dir) if not valid and not failures: failures = ['(entire test suite)'] if rc == 0: rc = 1 # Signal an abnormal exit. json.dump({ 'valid': valid, 'failures': failures, }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def check_for_version_conflicts(): global dependency_versions (exit_code, output) = common.run_command("cut -d, -f1-2 " + common.dependency_versions_csv_path + " | sort | uniq -d") dups_found = False if output != "": dups_found = True version_conflict_found = False (exit_code, output) = common.run_command("sort " + common.dependency_versions_csv_path + " | uniq | cut -d, -f1-2 | uniq -d") if output != "": version_conflict_found = True if version_conflict_found: common.print_error("Duplicate dependency with different version numbers definitions found") return 1 elif dups_found: common.print_warning("Duplicate dependency definitions found") return 0
def update_changelog(self, message=None): """ add an entry to changelog :param message: the message which is going to be added to changelog return: Ture if changelog is updated False, otherwise """ repo_name = self.get_repo_name() debian_exist = self.debian_exist() linked = False if not debian_exist: linked = self.link_debianstatic() if not debian_exist and not linked: return # -v: Add a new changelog entry with version number specified # -b: Force a version to be less than the current one # -m: Don't change (maintain) the trailer line in the changelog entry; i.e. # maintain the maintainer and date/time details try: version = self.get_current_version() # TBD: check whether the commit in manifest is the commit to jump version if version == self._version: print "[WARNING] The version of {0} is already {1}, skip the version bump action in debian/changelog for {0}"\ .format(repo_name, self._version) return print "start to update changelog of {0}".format(self._repo_dir) cmd_args = ["dch", "-v", self._version, "-b", "-m"] if message is None: message = "new release {0}".format(self._version) cmd_args += ["-p", message] common.run_command(cmd_args, directory=self._repo_dir) if linked: os.remove(os.path.join(self._repo_dir, "debian")) return except Exception, err: raise RuntimeError("Failed to add an entry for {0} in debian/changelog due to {1}".format(self._version, err))
def test_classes_for(domain, scope): test_classes_dir = dependencies.target_dir_for(domain, scope) (exit_code, output) = common.run_command("find " + test_classes_dir + " -type f -name *Test.class") test_classes = output.split("\n") test_classes_as_string = "" for test_class in test_classes: without_target_dir = test_class.replace(test_classes_dir, "") without_class_extn = without_target_dir.replace(".class", "") without_leading_slash = without_class_extn.strip("/") with_dots_not_slashes = without_leading_slash.replace("/", ".") test_classes_as_string = test_classes_as_string + " " + with_dots_not_slashes return test_classes_as_string
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=argparse.FileType('w'), required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' # Compatibility with gtest-based sharding. total_shards = None shard_index = None if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) del env['GTEST_TOTAL_SHARDS'] if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) del env['GTEST_SHARD_INDEX'] sharding_args = [] if total_shards is not None and shard_index is not None: sharding_args = [ '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] try: with common.temporary_file() as tempfile_path: rc = common.run_command([sys.executable] + rest_args + sharding_args + [ '--write-full-results-to', tempfile_path, ], env=env) with open(tempfile_path) as f: results = json.load(f) parsed_results = common.parse_common_test_results(results, test_separator='.') failures = parsed_results['unexpected_failures'] json.dump({ 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)), 'failures': failures.keys(), }, args.isolated_script_test_output) return rc finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def compile(source_dir, target_dir, classpath): cp_string = "" if classpath != "": cp_string = " -cp " + classpath + " " common.run_command('rm -rf ' + target_dir) common.run_command('mkdir -p ' + target_dir) common.run_command('find ' + source_dir + ' -type f -name "*.java" -print | xargs javac ' + cp_string + ' -d ' + target_dir + ' -sourcepath ' + source_dir)
def main_run(args): rc = common.run_command([sys.executable, os.path.join(common.SRC_DIR, 'tools', 'gn', 'bin', 'gyp_flag_compare.py')] + args.args) # TODO(dpranke): Figure out how to get a list of failures out of # gyp_flag_compare? json.dump({ 'valid': True, 'failures': ['compare_failed'] if rc else [], }, args.output) return rc
def ccbase2git(baselines, view_dir, git_dir, guarddirs): """ guarddirs - list of dirs where to guard empty subdirs (this will insert empty .gitignore file in any empty dir in guarddirs list) """ #TODO: obsluga bledow.. :( for baseline in baselines: starttime = time() print " START exporting baseline:", baseline run_command("cd " + view_dir + " ; sudo " + VCT_CMD + " rmprivate") run_command("cd " + view_dir + " ; " + VCT_CMD + " rebase " + baseline) for dir in guarddirs: noemptydirs(dir, ".gitignore") run_command("cd " + view_dir + " ; git --git-dir=" + git_dir + " --work-tree=" + view_dir + " add -v -A .") run_command("cd " + view_dir + " ; git --git-dir=" + git_dir + " --work-tree=" + view_dir + " commit -v -m \"ccbase2git_v2 " + baseline + "\"") endtime = time() print " END exporting baseline", baseline, "(time:", endtime - starttime, ")"
def run_tests_for(domain, scope): common.print_info_no_eol("Running " + scope + " for " + domain + "...") classpath = dependencies.classpath_for(domain, scope) test_classes_as_string = test_classes_for(domain, scope) if test_classes_as_string.strip() != "": run_tests_command = "java -cp " + classpath + " org.junit.runner.JUnitCore " + test_classes_as_string common.print_verbose("Running tests with:") common.print_verbose(run_tests_command) (exit_code, output) = common.run_command(run_tests_command) if exit_code == 0: common.print_info(" PASSED.") else: common.print_info(" FAILED.") else: common.print_info(" No tests found.")
def main_run(args): with common.temporary_file() as tempfile_path: rc = common.run_command( [ sys.executable, os.path.join(common.SRC_DIR, "third_party", "WebKit", "Tools", "Scripts", "lint-test-expectations"), "--json", tempfile_path, ] ) with open(tempfile_path) as f: failures = json.load(f) json.dump({"valid": True, "failures": failures}, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--isolated-script-test-output', type=str, required=True) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') args, rest_args = parser.parse_known_args() # Remove the chartjson extra arg until this script cares about chartjson # results from telemetry index = 0 for arg in rest_args: if '--isolated-script-test-chartjson-output' in arg: rest_args.pop(index) break index += 1 xvfb_proc = None openbox_proc = None xcompmgr_proc = None env = os.environ.copy() if args.xvfb and xvfb.should_start_xvfb(env): xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, build_dir='.') assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' # Compatibility with gtest-based sharding. total_shards = None shard_index = None if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) del env['GTEST_TOTAL_SHARDS'] if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) del env['GTEST_SHARD_INDEX'] sharding_args = [] if total_shards is not None and shard_index is not None: sharding_args = [ '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] try: return common.run_command([sys.executable] + rest_args + sharding_args + [ '--write-full-results-to', args.isolated_script_test_output], env=env) finally: xvfb.kill(xvfb_proc) xvfb.kill(openbox_proc) xvfb.kill(xcompmgr_proc)
def main_run(args): with common.temporary_file() as tempfile_path: rc = common.run_command([ os.path.join(common.SRC_DIR, 'android_webview', 'tools', 'webview_licenses.py'), 'scan', '--json', tempfile_path ]) with open(tempfile_path) as f: results = json.load(f) json.dump({ 'valid': True, 'failures': results, }, args.output) return rc
def main(): parser = argparse.ArgumentParser() parser.add_argument('--isolated-script-test-output', type=str, required=True) parser.add_argument('--xvfb', help='start xvfb', action='store_true') # This argument is ignored for now. parser.add_argument('--isolated-script-test-chartjson-output', type=str) args, rest_args = parser.parse_known_args() env = os.environ cmd = [sys.executable] + rest_args cmd += ['--write-full-results-to', args.isolated_script_test_output] if args.xvfb: return xvfb.run_executable(cmd, env) else: return common.run_command(cmd, env=env)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--isolated-script-test-output', type=str, required=True) args, rest_args = parser.parse_known_args() # Remove the chartjson extra arg until this script cares about chartjson # results index = 0 for arg in rest_args: if '--isolated-script-test-chartjson-output' in arg: rest_args.pop(index) break index += 1 ret = common.run_command([sys.executable] + rest_args) with open(args.isolated_script_test_output, 'w') as fp: json.dump({'valid': True, 'failures': ['failed'] if ret else []}, fp) return ret