def collect_tox_coverage_files(targeted_packages): root_coverage_dir = os.path.join(root_dir, "_coverage/") clean_coverage(coverage_dir) # coverage report has paths starting .tox and azure # coverage combine fixes this with the help of tox.ini[coverage:paths] combine_coverage_files(targeted_packages) coverage_files = [] # generate coverage files for package_dir in [package for package in targeted_packages]: coverage_file = os.path.join(package_dir, ".coverage") if os.path.isfile(coverage_file): destination_file = os.path.join( root_coverage_dir, ".coverage_{}".format(os.path.basename(package_dir))) shutil.copyfile(coverage_file, destination_file) coverage_files.append(destination_file) logging.info( "Visible uncombined .coverage files: {}".format(coverage_files)) if len(coverage_files): cov_cmd_array = [sys.executable, "-m", "coverage", "combine"] cov_cmd_array.extend(coverage_files) # merge them with coverage combine and copy to root run_check_call(cov_cmd_array, os.path.join(root_dir, "_coverage/")) source = os.path.join(coverage_dir, "./.coverage") dest = os.path.join(root_dir, ".coverage") shutil.move(source, dest)
def _install_packages(self, dependent_pkg_path, pkg_to_exclude): python_executable = self.context.venv.python_executable working_dir = self.context.package_root_path temp_dir = self.context.temp_path list_to_exclude = [pkg_to_exclude,] installed_pkgs = [p.split('==')[0] for p in list(freeze.freeze(paths=self.context.venv.lib_paths)) if p.startswith('azure-')] logging.info("Installed azure sdk packages:{}".format(installed_pkgs)) # Do not exclude list of packages in tools directory and so these tools packages will be reinstalled from repo branch we are testing root_path = os.path.abspath(os.path.join(dependent_pkg_path, "..", "..", "..")) tools_packages = find_tools_packages(root_path) installed_pkgs = [req for req in installed_pkgs if req not in tools_packages] list_to_exclude.extend(installed_pkgs) # install dev requirement but skip already installed package which is being tested or present in dev requirement filtered_dev_req_path = filter_dev_requirements( dependent_pkg_path, list_to_exclude, dependent_pkg_path ) if filtered_dev_req_path: logging.info( "Installing filtered dev requirements from {}".format(filtered_dev_req_path) ) run_check_call( [python_executable, "-m", "pip", "install", "-r", filtered_dev_req_path], dependent_pkg_path, ) else: logging.info("dev requirements is not found to install") # install dependent package which is being verified run_check_call( [python_executable, "-m", "pip", "install", dependent_pkg_path], temp_dir )
def _execute_test(self, dep_pkg_path): # install dependent package from source self._install_packages(dep_pkg_path, self.context.package_name) # Ensure correct version of package is installed if not self._is_package_installed(self.context.package_name, self.context.pkg_version): logging.error( "Incorrect version of package {0} is installed. Expected version {1}" .format(self.context.package_name, self.context.pkg_version)) sys.exit(1) logging.info("Running test for {}".format(dep_pkg_path)) commands = [ self.context.venv.python_executable, "-m", "pytest", "--verbose", "--durations", "10", ] # add any pytest mark arg if present. for e.g. 'not cosmosEmulator' if self.context.pytest_mark_arg: commands.extend(["-m", self.context.pytest_mark_arg]) test_dir = self._get_package_test_dir(dep_pkg_path) if test_dir: commands.append(test_dir) run_check_call(commands, self.context.temp_path) else: logging.info( "Test directory is not found in package root. Skipping {} from regression test." .format(self.context.package_name))
def _install_packages(self, dependent_pkg_path, pkg_to_exclude): python_executable = self.context.venv.python_executable working_dir = self.context.package_root_path temp_dir = self.context.temp_path list_to_exclude = [pkg_to_exclude, 'azure-sdk-tools', 'azure-devtools'] installed_pkgs = [ p.split('==')[0] for p in get_installed_packages(self.context.venv.lib_paths) if p.startswith('azure-') ] logging.info("Installed azure sdk packages:{}".format(installed_pkgs)) # Do not exclude list of packages in tools directory and so these tools packages will be reinstalled from repo branch we are testing root_path = os.path.abspath( os.path.join(dependent_pkg_path, "..", "..", "..")) tools_packages = find_tools_packages(root_path) installed_pkgs = [ req for req in installed_pkgs if req not in tools_packages ] list_to_exclude.extend(installed_pkgs) # install dev requirement but skip already installed package which is being tested or present in dev requirement filtered_dev_req_path = filter_dev_requirements( dependent_pkg_path, list_to_exclude, dependent_pkg_path) # early versions of azure-sdk-tools had an unpinned version of azure-mgmt packages. # that unpinned version hits an a code path in azure-sdk-tools that hits this error. if filtered_dev_req_path and self.context.is_latest_depend_test == False: logging.info("Extending dev requirements with {}".format( OLDEST_EXTENSION_PKGS)) extend_dev_requirements(filtered_dev_req_path, OLDEST_EXTENSION_PKGS) else: logging.info("Not extending dev requirements {} {}".format( filtered_dev_req_path, self.context.is_latest_depend_test)) if filtered_dev_req_path: logging.info( "Extending dev requirement to include azure-sdk-tools") extend_dev_requirements(filtered_dev_req_path, [ "../../../tools/azure-sdk-tools", "../../../tools/azure-devtools" ]) logging.info("Installing filtered dev requirements from {}".format( filtered_dev_req_path)) run_check_call( [ python_executable, "-m", "pip", "install", "-r", filtered_dev_req_path ], dependent_pkg_path, ) else: logging.info("dev requirements is not found to install") # install dependent package which is being verified run_check_call( [python_executable, "-m", "pip", "install", dependent_pkg_path], temp_dir)
def update_change_log(setup_py_location, version, service, package, is_unreleased, replace_latest_entry_title, release_date=None): script = os.path.join(root_dir, "eng", "common", "scripts", "Update-ChangeLog.ps1") pkg_root = os.path.abspath(os.path.join(setup_py_location, "..")) commands = [ "pwsh", script, "--Version", version, "--ServiceDirectory", service, "--PackageName", package, "--Unreleased:${}".format(is_unreleased), "--ReplaceLatestEntryTitle:${}".format(replace_latest_entry_title), ] if release_date is not None: commands.append("--ReleaseDate") commands.append(release_date) # Run script to update change log run_check_call(commands, pkg_root)
def _install_packages(self, dependent_pkg_path, pkg_to_exclude): python_executable = self.context.venv.python_executable working_dir = self.context.package_root_path temp_dir = self.context.temp_path # install dev requirement but skip already installed package which is being tested filtered_dev_req_path = filter_dev_requirements( dependent_pkg_path, [ pkg_to_exclude, ], dependent_pkg_path) if filtered_dev_req_path: logging.info("Installing filtered dev requirements from {}".format( filtered_dev_req_path)) run_check_call( [ python_executable, "-m", "pip", "install", "-r", filtered_dev_req_path ], dependent_pkg_path, ) else: logging.info("dev requirements is not found to install") # install dependent package which is being verified run_check_call( [python_executable, "-m", "pip", "install", dependent_pkg_path], temp_dir)
def prep_and_run_tests(targeted_packages, python_version): print('running test setup for {}'.format(targeted_packages)) run_check_call([python_version, dev_setup_script_location, '-p', ','.join([os.path.basename(package_path) for package_path in targeted_packages])], root_dir) print('Setup complete. Running pytest for {}'.format(targeted_packages)) command_array = [python_version, '-m', 'pytest'] command_array.extend(targeted_packages) run_check_call(command_array, root_dir, ALLOWED_RETURN_CODES)
def git_checkout_tag(tag_name, working_dir): # fetch tags run_check_call(["git", "fetch", "origin", "tag", tag_name], working_dir) logging.info("checkout git repo with tag {}".format(tag_name)) commands = ["git", "checkout", "tags/{}".format(tag_name)] run_check_call(commands, working_dir) logging.info("Code with tag {} is checked out successfully".format(tag_name))
def prep_tests(targeted_packages): logging.info("running test setup for {}".format(targeted_packages)) run_check_call( [ sys.executable, dev_setup_script_location, "--disabledevelop", "-p", ",".join([os.path.basename(p) for p in targeted_packages]) ], root_dir, )
def build_packages(targeted_packages, distribution_directory): # run the build and distribution for package_name in targeted_packages: print(package_name) print('Generating Package Using Python {}'.format(sys.version)) run_check_call([ 'python', build_packing_script_location, '--dest', distribution_directory, package_name ], root_dir)
def clone_repo(dest_dir, repo_url): if not os.path.isdir(dest_dir): logging.error( "Invalid destination directory to clone git repo:[{}]".format( dest_dir)) sys.exit(1) logging.info("cloning git repo using url {}".format(repo_url)) run_check_call(["git", "clone", "--depth=1", repo_url], dest_dir)
def prep_and_run_tests(targeted_packages, python_version, test_res): print('running test setup for {}'.format(targeted_packages)) run_check_call([python_version, dev_setup_script_location, '-p', ','.join(targeted_packages)], root_dir) print('Setup complete. Running pytest for {}'.format(targeted_packages)) command_array = [python_version, '-m', 'pytest'] command_array.extend(test_res) command_array.extend(targeted_packages) run_check_call(command_array, root_dir, ALLOWED_RETURN_CODES)
def checkout_code_repo(tag_name, working_dir): # fetch tags run_check_call(["git", "fetch", "--all", "--tags"], working_dir) logging.info("checkout git repo with tag {}".format(tag_name)) commands = ["git", "checkout", "tags/{}".format(tag_name)] run_check_call(commands, working_dir) logging.info( "Code with tag {} is checked out successfully".format(tag_name))
def _run_test(self, dep_pkg_path): self.context.initialize(dep_pkg_path) # find GA released tags for package and run test using that code base dep_pkg_name, version, _, _ = parse_setup(dep_pkg_path) release_tag = get_release_tag(dep_pkg_name, self.context.is_latest_depend_test) if not release_tag: logging.error( "Release tag is not available. Skipping package {} from test". format(dep_pkg_name)) return test_branch_name = "{0}_tests".format(release_tag) try: git_checkout_branch(test_branch_name, dep_pkg_path) except: # If git checkout failed for "tests" branch then checkout branch with release tag logging.info( "Failed to checkout branch {}. Checking out release tagged git repo" .format(test_branch_name)) git_checkout_tag(release_tag, dep_pkg_path) try: # install packages required to run tests run_check_call([ self.context.venv.python_executable, "-m", "pip", "install", "-r", test_tools_req_file, ], dep_pkg_path) # Install pre-built whl for current package. install_package_from_whl( self.whl_path, self.context.temp_path, self.context.venv.python_executable, ) # install dependent package from source self._install_packages(dep_pkg_path, self.context.package_name) # try install of pre-built whl for current package again. if unnecessary, pip does nothing. # we do this to ensure that the correct development version is installed. on non-dev builds # this step will just skip through. install_package_from_whl( self.whl_path, self.context.temp_path, self.context.venv.python_executable, ) self._execute_test(dep_pkg_path) finally: self.context.deinitialize(dep_pkg_path)
def generate_coverage_xml(): coverage_path = os.path.join(root_dir, ".coverage") if os.path.exists(coverage_path): logging.info("Generating coverage XML") commands = ["coverage", "xml", "-i", "--omit", '"*test*,*example*"'] run_check_call(commands, root_dir, always_exit=False) else: logging.error( "Coverage file is not available in {} to generate coverage XML". format(coverage_path))
def execute_tox_serial(tox_command_tuples): for index, cmd_tuple in enumerate(tox_command_tuples): tox_dir = os.path.join(cmd_tuple[1], "./.tox/") logging.info("Running tox for {}. {} of {}.".format( os.path.basename(cmd_tuple[1]), index + 1, len(tox_command_tuples))) run_check_call(cmd_tuple[0], cmd_tuple[1]) if in_ci(): shutil.rmtree(tox_dir)
def prep_tests(targeted_packages, python_version): logging.info("running test setup for {}".format(targeted_packages)) run_check_call( [ python_version, dev_setup_script_location, "-p", ",".join([os.path.basename(p) for p in targeted_packages]), ], root_dir, )
def initialize(self, dep_pkg_root_path): self.dep_pkg_root_path = dep_pkg_root_path self.venv.clear_venv() # install test tools requirements outside virtual environment to access pypi_tools package run_check_call([ sys.executable, "-m", "pip", "install", "-r", test_tools_req_file, ], self.package_root_path)
def update_change_log(setup_py_location, version, is_unreleased, replace_version): script = os.path.join(root_dir, "eng", "common", "Update-Change-Log.ps1") pkg_root = os.path.abspath(os.path.join(setup_py_location, "..")) commands = [ "pwsh", script, "--Version", version, "--ChangeLogPath", pkg_root, "--Unreleased", str(is_unreleased), "--ReplaceVersion", str(replace_version) ] # Run script to update change log run_check_call(commands, pkg_root)
def prep_and_run_tests(targeted_packages, python_version, test_res): print('running test setup for {}'.format(targeted_packages)) run_check_call([python_version, dev_setup_script_location, '-p', ','.join([os.path.basename(p) for p in targeted_packages])], root_dir) # if we are targeting only packages that are management plane, it is a possibility # that no tests running is an acceptable situation # we explicitly handle this here. if all(map(lambda x : any([pkg_id in x for pkg_id in MANAGEMENT_PACKAGE_IDENTIFIERS]), targeted_packages)): ALLOWED_RETURN_CODES.append(5) print('Setup complete. Running pytest for {}'.format(targeted_packages)) command_array = [python_version, '-m', 'pytest'] command_array.extend(test_res) command_array.extend(targeted_packages) run_check_call(command_array, root_dir, ALLOWED_RETURN_CODES, True)
def build_packages(targeted_packages, distribution_directory): # run the build and distribution for package_name in targeted_packages: print(package_name) print("Generating Package Using Python {}".format(sys.version)) run_check_call( [ "python", build_packing_script_location, "--dest", distribution_directory, package_name, ], root_dir, )
def combine_coverage_files(coverage_files): # find tox.ini file. tox.ini is used to combine coverage paths to generate formatted report tox_ini_file = os.path.join(root_dir, "eng", "tox", "tox.ini") config_file_flag = "--rcfile={}".format(tox_ini_file) if os.path.isfile(tox_ini_file): # for every individual coverage file, run coverage combine to combine path for coverage_file in coverage_files: cov_cmd_array = [sys.executable, "-m", "coverage", "combine"] # tox.ini file has coverage paths to combine # Pas tox.ini as coverage config file cov_cmd_array.extend([config_file_flag, coverage_file]) run_check_call(cov_cmd_array, root_dir) else: # not a hard error at this point # this combine step is required only for modules if report has package name starts with .tox logging.error("tox.ini is not found in path {}".format(root_dir))
def run_tests(targeted_packages, python_version, test_res): # if we are targeting only packages that are management plane, it is a possibility # that no tests running is an acceptable situation # we explicitly handle this here. if all( map( lambda x: any( [pkg_id in x for pkg_id in MANAGEMENT_PACKAGE_IDENTIFIERS]), targeted_packages)): ALLOWED_RETURN_CODES.append(5) print('Running pytest for {}'.format(targeted_packages)) command_array = [python_version, '-m', 'pytest'] command_array.extend(test_res) command_array.extend(targeted_packages) run_check_call(command_array, root_dir, ALLOWED_RETURN_CODES, True)
def check_diff(folder): # We don't care about changes to txt files (dev_requirements change) run_check_call(["git", "status"], sdk_dir, always_exit=False) command = [ "git", "checkout", "--", "**/*.txt", ] result = run_check_call(command, sdk_dir, always_exit=False) # Remove the whl dirs command = ["rm", "-r", "**/.tmp_whl_dir/"] result = run_check_call(command, sdk_dir, always_exit=False) # Next we need to move the autorest and _tox_logs directories and then replace them dir_changed = folder.split("/")[:-2] command = [ "git", "diff", "--exit-code", "{}".format("/".join(dir_changed)), ] result = run_check_call(command, sdk_dir, always_exit=False) if result: command = ["git", "status"] run_check_call(command, root_dir) raise ValueError( "Found difference between re-generated code and current commit. Please re-generate with the latest autorest." )
def build_whl_for_req(req, package_path): if ".." in req: # Create temp path if it doesn't exist temp_dir = os.path.join(package_path, ".tmp_whl_dir") if not os.path.exists(temp_dir): os.mkdir(temp_dir) req_pkg_path = os.path.abspath(os.path.join(package_path, req.replace("\n", ""))) pkg_name, version, _, _ = parse_setup(req_pkg_path) logging.info("Building wheel for package {}".format(pkg_name)) run_check_call([sys.executable, "setup.py", "bdist_wheel", "-d", temp_dir], req_pkg_path) whl_path = find_whl(pkg_name, version, temp_dir) logging.info("Wheel for package {0} is {1}".format(pkg_name, whl_path)) logging.info("Replacing dev requirement. Old requirement:{0}, New requirement:{1}".format(req, whl_path)) return whl_path else: return req
def check_diff(folder): # We don't care about changes to txt files (dev_requirements change) run_check_call(["git", "status"], sdk_dir, always_exit=False) dir_changed = folder.split("/")[:-2] command = [ "git", "diff", "--exit-code", "{}".format("/".join(dir_changed)), ] result = run_check_call(command, sdk_dir, always_exit=False) if result: command = ["git", "status"] run_check_call(command, root_dir) raise ValueError( "Found difference between formatted code and current commit. Please re-generate with the latest autorest." )
def _run_test(self, dep_pkg_path): self.context.initialize(dep_pkg_path) # find GA released tags for package and run test using that code base dep_pkg_name, version, _, _ = parse_setup(dep_pkg_path) release_tag = get_release_tag(dep_pkg_name, self.context.is_latest_depend_test) if not release_tag: logging.error( "Release tag is not available. Skipping package {} from test". format(dep_pkg_name)) return test_branch_name = "{0}_tests".format(release_tag) try: git_checkout_branch(test_branch_name, dep_pkg_path) except: # If git checkout failed for "tests" branch then checkout branch with release tag logging.info( "Failed to checkout branch {}. Checking out release tagged git repo" .format(test_branch_name)) git_checkout_tag(release_tag, dep_pkg_path) try: # install packages required to run tests run_check_call([ self.context.venv.python_executable, "-m", "pip", "install", "-r", test_tools_req_file, ], dep_pkg_path) # Install pre-built whl for current package install_package_from_whl( self.whl_path, self.context.temp_path, self.context.venv.python_executable, ) # install package to be tested and run pytest self._execute_test(dep_pkg_path) finally: self.context.deinitialize(dep_pkg_path)
def build_packages(targeted_packages, distribution_directory, is_dev_build=False): # run the build and distribution for package_root in targeted_packages: service_hierarchy = os.path.join(os.path.basename(package_root)) if is_dev_build: verify_update_package_requirement(package_root) print("Generating Package Using Python {}".format(sys.version)) run_check_call( [ sys.executable, build_packing_script_location, "--dest", os.path.join(distribution_directory, service_hierarchy), package_root, ], root_dir, )
def build_packages(targeted_packages, distribution_directory, is_dev_build=False): # run the build and distribution for package_root in targeted_packages: print(package_root) if is_dev_build: verify_update_package_requirement(package_root) print("Generating Package Using Python {}".format(sys.version)) run_check_call( [ "python", build_packing_script_location, "--dest", distribution_directory, package_root, ], root_dir, )
def run_autorest(service_dir): logging.info("Running autorest for {}".format(service_dir)) service_dir = os.path.join(sdk_dir, service_dir) swagger_folders = find_swagger_folders(service_dir) for working_dir in swagger_folders: os.chdir(working_dir) f = os.path.abspath(os.path.join(working_dir, "README.md")) if os.path.exists(f): reset_command = ["autorest", "--reset"] run_check_call(reset_command, root_dir) command = ["autorest", "--python", f, "--verbose"] logging.info("Command: {}\nLocation: {}\n".format( command, working_dir)) run_check_call(command, working_dir) return swagger_folders
def _run_test(self, dep_pkg_path): self.context.initialize(dep_pkg_path) # find GA released tags for package and run test using that code base dep_pkg_name, _, _, _ = parse_setup(dep_pkg_path) release_tag = get_release_tag(dep_pkg_name, self.context.is_latest_depend_test) if not release_tag: logging.error( "Release tag is not avaiable. Skipping package {} from test". format(dep_pkg_name)) return # Get code repo with released tag of dependent package checkout_code_repo(release_tag, dep_pkg_path) try: # install packages required to run tests run_check_call( [ self.context.venv.python_executable, "-m", "pip", "install", "-r", test_tools_req_file, ], self.context.package_root_path, ) # Install pre-built whl for current package install_package_from_whl( self.context.package_name, self.context.pkg_version, self.context.whl_directory, self.context.temp_path, self.context.venv.python_executable, ) # install package to be tested and run pytest self._execute_test(dep_pkg_path) finally: self.context.deinitialize(dep_pkg_path)