def write_mock_config(self, filename): """Retrieve mock config from Koji instance :param filename: output filename to write mock config """ target = self.config_options.koji_build_target arch = self.config_options.koji_arch try: worker_id = multiprocessing.current_process()._identity[0] except IndexError: # Not in multiprocessing mode worker_id = 1 run_cmd = [self.exe_name] run_cmd.extend(['mock-config', '--arch', arch, '--target', target, '-o', filename]) # FIXME(hguemar): add proper exception management sh.env(run_cmd, _env={'PATH': '/usr/bin/'}) lines = [] with open(filename, 'r') as fp: for line in fp.readlines(): if (line.startswith("config_opts['chroot_setup_cmd']") and self.config_options.mock_base_packages != ''): lines.append("config_opts['chroot_setup_cmd'] = " "'install %s'\n" % self.config_options.mock_base_packages) elif line.startswith("config_opts['root']"): # Append worker id to mock buildroot name line = line[:-2] + "-" + str(worker_id) + "'\n" lines.append(line) else: lines.append(line) with open(filename, 'w') as fp: fp.write(''.join(lines))
def _build_with_exe(self, package_name, output_dir, src_rpm, scratch, commit): """Build using koji/brew executables (cbs being an aliases) :param package_name: package name to build :param output_dir: output directory :param src_rpm: source RPM to build :param scratch: define if build is scratch or not """ krb_principal = self.config_options.koji_krb_principal keytab_file = self.config_options.koji_krb_keytab scratch = self.config_options.koji_scratch_build target = self.config_options.koji_build_target # Build package using koji/brew run_cmd = [self.exe_name] if krb_principal: run_cmd.extend(['--principal', krb_principal, '--keytab', keytab_file]) run_cmd.extend(['build', '--wait', target, src_rpm]) build_exception = None with io.open("%s/kojibuild.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.koji_fp: try: sh.env(run_cmd, _err=self._process_koji_output, _out=self._process_koji_output, _cwd=output_dir, scratch=scratch, _env={'PATH': '/usr/bin/'}) except Exception as e: build_exception = e return build_exception, "%s/kojibuild.log" % output_dir
def test_gerador_lero_lero(): ''' idem ao teste anterior ''' gerador_filename = abspath(join(dirname(__file__), '../gerador_lero_lero.py')) sh.env('python', gerador_filename, _bg=True) pid = get_pid(gerador_filename) assert pid sh.kill(pid) assert not get_pid(gerador_filename)
def test_app(): ''' pid não é obtido corretamente quando o teste é executado, mas funciona durante depuração ''' app_filename = abspath(join(dirname(__file__), '../app.py')) sh.env('python', app_filename, _bg=True) pid = get_pid(app_filename) assert pid sh.kill('-s', 'SIGUSR1', pid) assert get_pid(app_filename) sh.kill(pid) assert not get_pid(app_filename)
def run(program, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True): datadir = os.path.realpath(config_options.datadir) yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) yumrepodir_abs = os.path.join(datadir, yumrepodir) commit_hash = commit.commit_hash project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) sh.git("--git-dir", "%s/.git" % repo_dir, "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) if (dev_mode or use_public): run_cmd.append("DLRN_DEV=1") if bootstrap is True: run_cmd.append("REPO_BOOTSTRAP=1") run_cmd.extend([ program, config_options.target, project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl ]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh_version = SemanticVersion.from_pip_string(sh.__version__) min_sh_version = SemanticVersion.from_pip_string('1.09') if sh_version > min_sh_version: sh.env(run_cmd) else: sh.env_(run_cmd) except Exception as e: logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def run(program, cp, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True): datadir = os.path.realpath(cp.get("DEFAULT", "datadir")) target = cp.get("DEFAULT", "target") yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) yumrepodir_abs = os.path.join(datadir, yumrepodir) baseurl = cp.get("DEFAULT", "baseurl") commit_hash = commit.commit_hash project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) sh.git("--git-dir", "%s/.git" % repo_dir, "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) if (dev_mode or use_public): run_cmd.append("DELOREAN_DEV=1") if bootstrap is True: run_cmd.append("REPO_BOOTSTRAP=1") run_cmd.extend([program, target, project_name, os.path.join(datadir, yumrepodir), datadir, baseurl]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh_version = SemanticVersion.from_pip_string(sh.__version__) min_sh_version = SemanticVersion.from_pip_string('1.09') if sh_version > min_sh_version: sh.env(run_cmd) else: sh.env_(run_cmd) except Exception as e: logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def submit_review(commit, env_vars): datadir = os.path.realpath(config_options.datadir) scriptsdir = os.path.realpath(config_options.scriptsdir) yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) project_name = commit.project_name run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([os.path.join(scriptsdir, "submit_review.sh"), project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl]) sh.env(run_cmd)
def run(program, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True, version_from=None): config_options = getConfigOptions() datadir = os.path.realpath(config_options.datadir) yumrepodir = _get_yumrepodir(commit) yumrepodir_abs = os.path.join(datadir, yumrepodir) project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) if version_from: logger.info('Taking tags to define version from %s' % version_from) git = sh.git.bake(_cwd=repo_dir, _tty_out=False) git.merge('-s', 'ours', '-m', '"fake merge tags"', version_from) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([ program, config_options.target, project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl, os.path.realpath(commit.distgit_dir) ]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh.env(run_cmd, _err=process_mock_output, _out=process_mock_output) except Exception as e: # This *could* have changed during the build, see kojidriver.py datadir = os.path.realpath(config_options.datadir) yumrepodir = _get_yumrepodir(commit) logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def submit_review(cp, commit, env_vars): datadir = os.path.realpath(cp.get("DEFAULT", "datadir")) scriptsdir = os.path.realpath(cp.get("DEFAULT", "scriptsdir")) baseurl = cp.get("DEFAULT", "baseurl") yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) project_name = commit.project_name run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([os.path.join(scriptsdir, "submit_review.sh"), project_name, os.path.join(datadir, yumrepodir), datadir, baseurl]) sh.env(run_cmd)
def run_external_preprocess(**kwargs): # Initially, get any params to be set as environment variables pkgname = kwargs.get('pkgname') distgit = kwargs.get('distgit') upstream_distgit = kwargs.get('upstream_distgit') cmdline = kwargs.get('cmdline') distroinfo = kwargs.get('distroinfo') srcdir = kwargs.get('source_dir') commit_hash = kwargs.get('commit_hash') username = os.environ.get('USER', None) run_cmd = [] # Append environment variables if pkgname: run_cmd.append("DLRN_PACKAGE_NAME=%s" % pkgname) if distgit: run_cmd.append("DLRN_DISTGIT=%s" % distgit) if upstream_distgit: run_cmd.append("DLRN_UPSTREAM_DISTGIT=%s" % upstream_distgit) if distroinfo: run_cmd.append("DLRN_DISTROINFO_REPO=%s" % distroinfo) if srcdir: run_cmd.append("DLRN_SOURCEDIR=%s" % srcdir) if commit_hash: run_cmd.append("DLRN_SOURCE_COMMIT=%s" % commit_hash) if username: run_cmd.append("DLRN_USER=%s" % username) run_cmd.extend([cmdline]) logger.info('Running custom pre-process: %s' % ' '.join(run_cmd)) try: # We are forcing LANG to be C here, because env decides to use # non-ascii characters when the command is not found in UTF-8 # environments sh.env(run_cmd, _cwd=distgit, _env={'LANG': 'C'}) except Exception as e: msg = getattr(e, 'stderr', None) if msg: msg = msg.decode('utf-8') else: msg = e raise RuntimeError('Custom pre-process failed: %s' % msg)
def run(program, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True, version_from=None): config_options = getConfigOptions() datadir = os.path.realpath(config_options.datadir) yumrepodir = _get_yumrepodir(commit) yumrepodir_abs = os.path.join(datadir, yumrepodir) project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) if version_from: logger.info('Taking tags to define version from %s' % version_from) git = sh.git.bake(_cwd=repo_dir, _tty_out=False) git.merge('-s', 'ours', '-m', '"fake merge tags"', version_from) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([program, config_options.target, project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl, os.path.realpath(commit.distgit_dir)]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh.env(run_cmd, _err=process_mock_output, _out=process_mock_output) except Exception as e: # This *could* have changed during the build, see kojidriver.py datadir = os.path.realpath(config_options.datadir) yumrepodir = _get_yumrepodir(commit) logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def submit_review(commit, packages, env_vars): config_options = getConfigOptions() datadir = os.path.realpath(config_options.datadir) scriptsdir = os.path.realpath(config_options.scriptsdir) yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) project_name = commit.project_name for pkg in packages: if project_name == pkg['name']: break else: logger.error('Unable to find info for project' ' %s' % project) return url = (get_commit_url(commit, pkg) + commit.commit_hash) env_vars.append('GERRIT_URL=%s' % url) env_vars.append('GERRIT_LOG=%s/%s' % (config_options.baseurl, commit.getshardedcommitdir())) maintainers = ','.join(pkg['maintainers']) env_vars.append('GERRIT_MAINTAINERS=%s' % maintainers) env_vars.append('GERRIT_TOPIC=%s' % config_options.gerrit_topic) logger.info('Creating a gerrit review using ' 'GERRIT_URL=%s ' 'GERRIT_MAINTAINERS=%s ' % (url, maintainers)) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([os.path.join(scriptsdir, "submit_review.sh"), project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl, os.path.realpath(commit.distgit_dir)]) sh.env(run_cmd, _timeout=300)
def _build_with_exe(self, package_name, output_dir, src_rpm, scratch, commit): """Build using koji/brew executables (cbs being an aliases) :param package_name: package name to build :param output_dir: output directory :param src_rpm: source RPM to build :param scratch: define if build is scratch or not """ krb_principal = self.config_options.koji_krb_principal keytab_file = self.config_options.koji_krb_keytab scratch = self.config_options.koji_scratch_build target = self.config_options.koji_build_target # Build package using koji/brew run_cmd = [self.exe_name] if krb_principal: run_cmd.extend( ['--principal', krb_principal, '--keytab', keytab_file]) run_cmd.extend(['build', '--wait', target, src_rpm]) build_exception = None with io.open("%s/kojibuild.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.koji_fp: try: sh.env(run_cmd, _err=self._process_koji_output, _out=self._process_koji_output, _cwd=output_dir, scratch=scratch, _env={'PATH': '/usr/bin/'}) except Exception as e: build_exception = e return build_exception, "%s/kojibuild.log" % output_dir
def write_mock_config(self, filename): """Retrieve mock config from Koji instance :param filename: output filename to write mock config """ target = self.config_options.koji_build_target arch = self.config_options.koji_arch try: worker_id = multiprocessing.current_process()._identity[0] except IndexError: # Not in multiprocessing mode worker_id = 1 run_cmd = [self.exe_name] run_cmd.extend([ 'mock-config', '--arch', arch, '--target', target, '-o', filename ]) # FIXME(hguemar): add proper exception management sh.env(run_cmd, _env={'PATH': '/usr/bin/'}) lines = [] with open(filename, 'r') as fp: for line in fp.readlines(): if (line.startswith("config_opts['chroot_setup_cmd']") and self.config_options.mock_base_packages != ''): lines.append("config_opts['chroot_setup_cmd'] = " "'install %s'\n" % self.config_options.mock_base_packages) elif line.startswith("config_opts['root']"): # Append worker id to mock buildroot name line = line[:-2] + "-" + str(worker_id) + "'\n" lines.append(line) else: lines.append(line) if self.config_options.mock_package_manager: lines.append("config_opts['package_manager'] = '%s'\n" % self.config_options.mock_package_manager) with open(filename, 'w') as fp: fp.write(''.join(lines))
def submit_review(commit, packages, env_vars): config_options = getConfigOptions() datadir = os.path.realpath(config_options.datadir) scriptsdir = os.path.realpath(config_options.scriptsdir) yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) project_name = commit.project_name for pkg in packages: if project_name == pkg['name']: break else: logger.error('Unable to find info for project' ' %s' % project) return url = (get_commit_url(commit, pkg) + commit.commit_hash) env_vars.append('GERRIT_URL=%s' % url) env_vars.append('GERRIT_LOG=%s/%s' % (config_options.baseurl, commit.getshardedcommitdir())) maintainers = ','.join(pkg['maintainers']) env_vars.append('GERRIT_MAINTAINERS=%s' % maintainers) env_vars.append('GERRIT_TOPIC=%s' % config_options.gerrit_topic) logger.info('Creating a gerrit review using ' 'GERRIT_URL=%s ' 'GERRIT_MAINTAINERS=%s ' % (url, maintainers)) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) run_cmd.extend([ os.path.join(scriptsdir, "submit_review.sh"), project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl, os.path.realpath(commit.distgit_dir) ]) sh.env(run_cmd, _timeout=300)
def _run(self, script_data): tmp_script = self._tempfile tmp_script_path = Path(tmp_script[-1]) tmp_script_path.write_text(script_data, encoding="utf8") self._make_executable(tmp_script_path) os.close(tmp_script[0]) try: for line in sh.env(str(tmp_script_path), _env=app.env.copy(), _iter=True, _bg_exc=False): app.log.debug(f"run :: {line.strip()}") except sh.ErrorReturnCode as error: raise SpecProcessException( f"Failure to bootstrap: {error.stderr.decode().strip()}")
def build_package(self, **kwargs): """Valid parameters: :param output_directory: directory where the SRPM is located, and the built packages will be. :param additional_mock_opts: string with additional options to be passed to mock. """ output_dir = kwargs.get('output_directory') additional_mock_opts = kwargs.get('additional_mock_opts') datadir = os.path.realpath(self.config_options.datadir) mock_config = os.environ.get('MOCK_CONFIG') install_after_build = self.config_options.install_after_build # Find src.rpm for rpm in os.listdir(output_dir): if rpm.endswith(".src.rpm"): src_rpm = '%s/%s' % (output_dir, rpm) try: # And build package with io.open("%s/mock.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.mock_fp: try: mock_opts = [ '-v', '-r', '%s/%s' % (datadir, mock_config), '--resultdir', output_dir ] if additional_mock_opts: mock_opts += [additional_mock_opts] mock_opts += ['--rebuild', src_rpm] sh.env('/usr/bin/mock', *mock_opts, postinstall=install_after_build, _err=self._process_mock_output, _out=self._process_mock_output) except Exception as e: raise e if install_after_build: # Check for warning about built packages failing to install with open("%s/mock.log" % output_dir, 'r') as fp: mock_content = fp.readlines() warn_match = re.compile( r'\W*WARNING: Failed install built packages.*') for line in mock_content: m = warn_match.match(line) if m is not None: raise Exception('Failed to install built packages') # All went fine, create the $OUTPUT_DIRECTORY/installed file open('%s/installed' % output_dir, 'a').close() finally: with open("%s/mock.log" % output_dir, 'r') as fp: mock_content = fp.readlines() # Append mock output to rpmbuild.log with open('%s/rpmbuild.log' % output_dir, 'a') as fp: for line in mock_content: fp.write(line) # Finally run restorecon try: sh.restorecon('-Rv', output_dir) except Exception as e: logger.info('restorecon did not run correctly, %s' % e)
def build(cp, package_info, commit, env_vars, dev_mode, use_public): # Set the build timestamp to now commit.dt_build = int(time()) datadir = os.path.realpath(cp.get("DEFAULT", "datadir")) scriptsdir = os.path.realpath(cp.get("DEFAULT", "scriptsdir")) target = cp.get("DEFAULT", "target") yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) yumrepodir_abs = os.path.join(datadir, yumrepodir) baseurl = cp.get("DEFAULT", "baseurl") commit_hash = commit.commit_hash project_name = commit.project_name repo_dir = commit.repo_dir # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) sh.git("--git-dir", "%s/.git" % repo_dir, "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash) run_cmd = [] # expand the env name=value pairs into docker arguments if env_vars: for env_var in env_vars: run_cmd.append(env_var) if (dev_mode or use_public): run_cmd.append("DELOREAN_DEV=1") run_cmd.extend([os.path.join(scriptsdir, "build_rpm_wrapper.sh"), target, project_name, os.path.join(datadir, yumrepodir), datadir, baseurl]) try: sh.env(run_cmd) except Exception as e: logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e built_rpms = [] for rpm in os.listdir(yumrepodir_abs): if rpm.endswith(".rpm"): built_rpms.append(os.path.join(yumrepodir, rpm)) if not built_rpms: raise Exception("No rpms built for %s" % project_name) notes = "OK" if not os.path.isfile(os.path.join(yumrepodir_abs, "installed")): logger.error('Build failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise Exception("Error installing %s" % project_name) packages = [package["name"] for package in package_info["packages"]] for otherproject in packages: if otherproject == project_name: continue last_success = session.query(Commit).\ filter(Commit.project_name == otherproject).\ filter(Commit.status == "SUCCESS").\ order_by(desc(Commit.id)).first() if not last_success: continue rpms = last_success.rpms.split(",") for rpm in rpms: rpm_link_src = os.path.join(yumrepodir_abs, os.path.split(rpm)[1]) os.symlink(os.path.relpath(os.path.join(datadir, rpm), yumrepodir_abs), rpm_link_src) sh.createrepo(yumrepodir_abs) with open(os.path.join( yumrepodir_abs, "%s.repo" % cp.get("DEFAULT", "reponame")), "w") as fp: fp.write("[%s]\nname=%s-%s-%s\nbaseurl=%s/%s\nenabled=1\n" "gpgcheck=0\npriority=1" % (cp.get("DEFAULT", "reponame"), cp.get("DEFAULT", "reponame"), project_name, commit_hash, cp.get("DEFAULT", "baseurl"), commit.getshardedcommitdir())) current_repo_dir = os.path.join(datadir, "repos", "current") os.symlink(os.path.relpath(yumrepodir_abs, os.path.join(datadir, "repos")), current_repo_dir + "_") os.rename(current_repo_dir + "_", current_repo_dir) return built_rpms, notes
def build_package(self, **kwargs): """Valid parameters: :param output_directory: directory where the SRPM is located, and the built packages will be. """ output_dir = kwargs.get('output_directory') coprid = self.config_options.coprid # Find src.rpm for rpm in os.listdir(output_dir): if rpm.endswith(".src.rpm"): src_rpm = '%s/%s' % (output_dir, rpm) try: # Build package using copr run_cmd = [] run_cmd.extend([self.exe_name, 'build', coprid, src_rpm]) build_exception = None with io.open("%s/coprbuild.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.copr_fp: try: sh.env(run_cmd, _err=self._process_copr_output, _out=self._process_copr_output, _cwd=output_dir, _env={'PATH': '/usr/bin/'}) except Exception as e: build_exception = e # Find task id to download logs with open("%s/coprbuild.log" % output_dir, 'r') as fp: log_content = fp.readlines() build_id = None for line in log_content: m = re.search(r'^Created builds: (\d+)$', line) if m: logger.info("Created build id %s" % m.group(1)) build_id = m.group(1) break if not build_id: raise Exception('Failed to find build id for the copr build') # Download build artifacts and logs ddir = "%s/%s" % (output_dir, build_id) run_cmd = [] run_cmd.extend( [self.exe_name, 'download-build', '-d', ddir, build_id]) with io.open("%s/coprdownload.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.copr_fp: try: sh.env(run_cmd, _err=self._process_copr_output, _out=self._process_copr_output, _cwd=output_dir, _env={'PATH': '/usr/bin/'}) except Exception as e: raise e # Move specific download files in output_dir exts_filter = ['.rpm', '.log.gz'] # Only a directory named with the build target name # must be in download directory target_name = os.listdir(ddir)[0] target_dir = os.path.join(ddir, target_name) # Do the copy of file we care of for f in os.listdir(target_dir): if any([f.endswith(ft) for ft in exts_filter]): src = os.path.join(target_dir, f) dst = os.path.join(output_dir, f) logger.info("Copying %s to %s" % (src, dst)) shutil.copy(src, dst) # Remove download directory logger.info("Removing %s" % ddir) shutil.rmtree(ddir) # All went fine, create the $OUTPUT_DIRECTORY/installed file open('%s/installed' % output_dir, 'a').close() finally: # Finally run restorecon try: sh.restorecon('-Rv', output_dir) except Exception as e: logger.info('restorecon did not run correctly, %s' % e) # We only want to raise the build exception at the very end, after # downloading all relevant artifacts if build_exception: raise build_exception
def build_package(self, **kwargs): """Valid parameters: :param output_directory: directory where the SRPM is located, and the built packages will be. :param additional_mock_opts: string with additional options to be passed to mock. """ output_dir = kwargs.get('output_directory') additional_mock_opts = kwargs.get('additional_mock_opts') datadir = os.path.realpath(self.config_options.datadir) mock_config = os.environ.get('MOCK_CONFIG') install_after_build = self.config_options.install_after_build # Find src.rpm for rpm in os.listdir(output_dir): if rpm.endswith(".src.rpm"): src_rpm = '%s/%s' % (output_dir, rpm) try: # And build package with io.open("%s/mock.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.mock_fp: try: mock_opts = ['-v', '-r', '%s/%s' % (datadir, mock_config), '--resultdir', output_dir] if additional_mock_opts: mock_opts += [additional_mock_opts] mock_opts += ['--rebuild', src_rpm] sh.env('/usr/bin/mock', *mock_opts, postinstall=install_after_build, _err=self._process_mock_output, _out=self._process_mock_output) except Exception as e: raise e if install_after_build: # Check for warning about built packages failing to install with open("%s/mock.log" % output_dir, 'r') as fp: mock_content = fp.readlines() warn_match = re.compile( '\W*WARNING: Failed install built packages.*') for line in mock_content: m = warn_match.match(line) if m is not None: raise Exception('Failed to install built packages') # All went fine, create the $OUTPUT_DIRECTORY/installed file open('%s/installed' % output_dir, 'a').close() finally: with open("%s/mock.log" % output_dir, 'r') as fp: mock_content = fp.readlines() # Append mock output to rpmbuild.log with open('%s/rpmbuild.log' % output_dir, 'a') as fp: for line in mock_content: fp.write(line) # Finally run restorecon try: sh.restorecon('-Rv', output_dir) except Exception as e: logger.info('restorecon did not run correctly, %s' % e)
def build_package(self, **kwargs): """Valid parameters: :param output_directory: directory where the SRPM is located, and the built packages will be. :param package_name: name of a package to build """ output_dir = kwargs.get('output_directory') package_name = kwargs.get('package_name') commit = kwargs.get('commit') scratch = self.config_options.koji_scratch_build build_exception = None # Find src.rpm for rpm in os.listdir(output_dir): if rpm.endswith(".src.rpm"): src_rpm = os.path.realpath('%s/%s' % (output_dir, rpm)) try: if self.config_options.koji_use_rhpkg: build_method = self._build_with_rhpkg else: build_method = self._build_with_exe build_exception, logfile = build_method( package_name, output_dir, src_rpm, scratch, commit) if self.config_options.koji_use_rhpkg: # In this case, we need to re-calculate the output directory datadir = os.path.realpath(self.config_options.datadir) output_dir = os.path.join(datadir, "repos", commit.getshardedcommitdir()) # Find task id to download logs with open(logfile, 'r') as fp: log_content = fp.readlines() task_id = None for line in log_content: m = re.search("^Created task: (\d+)$", line) if m: logger.info("Created task id %s" % m.group(1)) task_id = m.group(1) break if not task_id: raise Exception('Failed to find task id for the koji build') # Download build artifacts and logs run_cmd = [] run_cmd.extend( [self.exe_name, 'download-task', '--logs', task_id]) with io.open("%s/build_download.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.koji_fp: try: sh.env(run_cmd, _err=self._process_koji_output, _out=self._process_koji_output, _cwd=output_dir, _env={'PATH': '/usr/bin/'}) except Exception as e: raise e # All went fine, create the $OUTPUT_DIRECTORY/installed file open('%s/installed' % output_dir, 'a').close() finally: # Finally run restorecon try: sh.restorecon('-Rv', output_dir) except Exception as e: logger.info('restorecon did not run correctly, %s' % e) # We only want to raise the build exception at the very end, after # downloading all relevant artifacts if build_exception: raise build_exception
def build_package(self, **kwargs): """Valid parameters: :param output_directory: directory where the SRPM is located, and the built packages will be. :param package_name: name of a package to build """ output_dir = kwargs.get('output_directory') package_name = kwargs.get('package_name') commit = kwargs.get('commit') scratch = self.config_options.koji_scratch_build build_exception = None # Find src.rpm for rpm in os.listdir(output_dir): if rpm.endswith(".src.rpm"): src_rpm = os.path.realpath('%s/%s' % (output_dir, rpm)) try: if self.config_options.koji_use_rhpkg: build_method = self._build_with_rhpkg else: build_method = self._build_with_exe build_exception, logfile = build_method(package_name, output_dir, src_rpm, scratch, commit) if self.config_options.koji_use_rhpkg: # In this case, we need to re-calculate the output directory datadir = os.path.realpath(self.config_options.datadir) output_dir = os.path.join(datadir, "repos", commit.getshardedcommitdir()) # Find task id to download logs with open(logfile, 'r') as fp: log_content = fp.readlines() task_id = None for line in log_content: m = re.search(r'^Created task: (\d+)$', line) if m: logger.info("Created task id %s" % m.group(1)) task_id = m.group(1) break if not task_id: raise Exception('Failed to find task id for the koji build') # Also find package name if we need to add tags if len(self.config_options.koji_add_tags) > 0: # Get build name m = re.search(r'([0-9a-zA-Z._+-]+)\.src\.rpm', src_rpm) package_nvr = None if m: logger.info("Adding tags for %s" % m.group(1)) package_nvr = m.group(1) if not package_nvr: raise Exception('Failed to find package nvr when tagging') for tag in self.config_options.koji_add_tags: run_cmd = [] run_cmd.extend( [self.exe_name, 'tag-build', tag, package_nvr]) with io.open("%s/additional_tags.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.koji_fp: try: sh.env(run_cmd, _err=self._process_koji_output, _out=self._process_koji_output, _cwd=output_dir, _env={'PATH': '/usr/bin/'}) except Exception as e: raise e # Download build artifacts and logs run_cmd = [] run_cmd.extend([self.exe_name, 'download-task', '--logs', task_id]) with io.open("%s/build_download.log" % output_dir, 'a', encoding='utf-8', errors='replace') as self.koji_fp: try: sh.env(run_cmd, _err=self._process_koji_output, _out=self._process_koji_output, _cwd=output_dir, _env={'PATH': '/usr/bin/'}) except Exception as e: raise e # All went fine, create the $OUTPUT_DIRECTORY/installed file open('%s/installed' % output_dir, 'a').close() finally: # Finally run restorecon try: sh.restorecon('-Rv', output_dir) except Exception as e: logger.info('restorecon did not run correctly, %s' % e) # We only want to raise the build exception at the very end, after # downloading all relevant artifacts if build_exception: raise build_exception
''' produce a string representation ''' path = self._path path, _ = os.path.split(path) path, arch = os.path.split(path) path, _ = os.path.split(path) _, repo = os.path.split(path) return "%s/%s/%s" % (repo, arch, self.pkgname) # pkgbuild_schema_strings # pkgbuild_schema_arrays # pkgbuild_schema_arch_arrays # pkgbuild_schema_package_overrides SRCINFO_VALUE = sh.env( '-i', 'bash', '-c', ''' . /usr/share/makepkg/util/schema.sh echo -n "${pkgbuild_schema_strings[@]}" ''').split() SRCINFO_LIST = sh.env( '-i', 'bash', '-c', ''' . /usr/share/makepkg/util/schema.sh echo -n "${pkgbuild_schema_arrays[@]}" ''').split() for ARCH in CONFIG.parabola.arches: SRCINFO_LIST.extend([ '%s_%s' % (v, ARCH) for v in sh.env( '-i', 'bash', '-c', ''' . /usr/share/makepkg/util/schema.sh echo -n "${pkgbuild_schema_arch_arrays[@]}"