def __init__(self, name, *urls): """ Following `url` schemes are supported: http://, ftp://, file:// (used if full path is passed). """ super(Yum, self).__init__() self.load_config_to_self("yum_root", ["yum.root"], "/") self.name = name self.urls = urls self.yum_base = yum.YumBase() self.yum_base.doConfigSetup(init_plugins=False, debuglevel=0, root=self.yum_root) self.yum_base.conf.cachedir = get_temp_dir("yum") self.yum_base.disablePlugins() self.yum_base.repos.disableRepo("*") for i, url in enumerate(urls): if url.startswith("/"): url = "file://{0}".format(url) # call str() on self.url, because if self.url is unicode, # list_packages will crash on el6 self.yum_base.add_enable_repo("faf_{0}-{1}".format(self.name, i), baseurls=[str(url)])
def _tar_xz(self, archive_name, archive_dir, filepaths, unlink=True) -> None: archive_path = os.path.join(archive_dir, archive_name) archive_path_tmp = os.path.join(archive_dir, "{0}.tmp".format(archive_name)) tmpdir = get_temp_dir() tmpsubdir = os.path.join(tmpdir, "archive") unlink_paths = list(filepaths) try: os.makedirs(tmpsubdir) except OSError as ex: if ex.errno == errno.EEXIST: raise FafError("The directory '{0}' already exists".format( tmpsubdir)) from ex raise untar = None if os.path.isfile(archive_path): self.log_info("An existing archive found, will merge the contents") untar = self._untar_xz(archive_path) with os.scandir(untar) as iterator: for entry in iterator: if entry.is_dir() and entry.name.endswith("archive"): filepaths = [ os.path.join(entry.path, f) for f in os.listdir(entry.path) ] + filepaths break self.log_info("Creating symlinks") for filepath in filepaths: linkpath = os.path.join(tmpsubdir, os.path.basename(filepath)) # merge - do not overwrite already archived data try: self.log_debug("%s ~> %s", filepath, linkpath) os.symlink(filepath, linkpath) except OSError as ex: if ex.errno != errno.EEXIST: raise self.log_debug("Already exists") self.log_info("Running tar") safe_popen("tar", "chJf", archive_path_tmp, "-C", tmpdir, "archive") os.rename(archive_path_tmp, archive_path) self.log_info("Cleaning up") if untar is not None: shutil.rmtree(untar, ignore_errors=True) if unlink: for path in unlink_paths: os.unlink(path) shutil.rmtree(tmpsubdir)
def unpack_rpm_to_tmp(rpm_path: str, prefix: str = "faf") -> str: """ Unpack RPM package to a temp directory. The directory is either specified in storage.tmpdir config option or use the system default temp directory. """ tmpdir = get_temp_dir("rpm") result = tempfile.mkdtemp(prefix=prefix, dir=tmpdir) for dirname in ["bin", "lib", "lib64", "sbin"]: os.makedirs(os.path.join(result, "usr", dirname)) os.symlink(os.path.join("usr", dirname), os.path.join(result, dirname)) with Popen(["/usr/bin/rpm2cpio", rpm_path], stdout=PIPE, stderr=PIPE) as rpm2cpio, \ Popen(["/usr/bin/cpio", "-id", "--quiet"], stdin=rpm2cpio.stdout, stderr=PIPE, cwd=result) as cpio: #FIXME: false positive by pylint # pylint: disable=fixme rpm2cpio.stdout.close() # pylint: disable=no-member try: # generous timeout of 15 minutes (kernel unpacking) cpio.communicate(timeout=900) except TimeoutExpired: cpio.kill() cpio.communicate() finally: if cpio.returncode != 0: shutil.rmtree(result) raise FafError(f"Failed to unpack RPM '{rpm_path}'") return result
def __init__(self, name, *urls): """ Following `url` schemes are supported: http://, ftp://, file:// (used if full path is passed). """ super(Dnf, self).__init__() self.dnf_root = None self.load_config_to_self("dnf_root", ["dnf.root"], "/") self.name = name self.urls = urls self.dnf_base = dnf.Base() self.dnf_base.conf.debuglevel = 0 self.dnf_base.conf.installroot = self.dnf_root self.dnf_base.conf.cachedir = get_temp_dir("dnf") self.dnf_base.read_all_repos() self.dnf_base.repos.all().disable() # Add repositories for i, url in enumerate(urls): if isinstance(url, six.string_types): if url.startswith("/"): url = "file://{0}".format(url) # call str() on url, because if url is unicode, # list_packages will crash on el6 self.dnf_base.repos.add_new_repo("faf_{0}-{1}".format( self.name, i), self.dnf_base.conf, baseurl=[str(url)], skip_if_unavailable=False) else: for url_single in url: if url_single.startswith("/"): url_single = "file://{0}".format(url_single) try: request.urlopen( os.path.join(url_single, "repodata/repomd.xml")) self.dnf_base.repos.add_new_repo( "faf_{0}-{1}".format(self.name, i), self.dnf_base.conf, baseurl=[url_single], skip_if_unavailable=False) break except: # pylint: disable=bare-except pass else: self.log_error("No mirrors available") raise NameError('NoMirrorsAvailable') # A sack is required by marking methods and dependency resolving try: self.dnf_base.fill_sack() except dnf.exceptions.RepoError: self.log_error("Repo error")
def __init__(self, name, *urls) -> None: """ Following `url` schemes are supported: http://, ftp://, file:// (used if full path is passed). """ super().__init__() self.dnf_metadata_expire = "24h" self.load_config_to_self("dnf_metadata_expire", ["dnf.metadata_expire"], default="24h") self.dnf_root = None self.load_config_to_self("dnf_root", ["dnf.root"], "/") self.name = name self.urls = urls self.dnf_base = dnf.Base() self.dnf_base.conf.debuglevel = 0 self.dnf_base.conf.installroot = self.dnf_root self.dnf_base.conf.cachedir = get_temp_dir("dnf") self.dnf_base.conf.metadata_expire = self.dnf_metadata_expire # Add repositories for i, url in enumerate(urls): if isinstance(url, str): if url.startswith("/"): url = "file://{0}".format(url) # call str() on url, because if url is unicode, # list_packages will crash on el6 self.dnf_base.repos.add_new_repo("faf_{0}-{1}".format( self.name, i), self.dnf_base.conf, baseurl=[str(url)], skip_if_unavailable=True) else: for url_single in url: if url_single.startswith("/"): url_single = "file://{0}".format(url_single) try: with request.urlopen( os.path.join(url_single, "repodata/repomd.xml")): # Ignore the result. We only want to know if the URL is live. pass self.dnf_base.repos.add_new_repo( "faf_{0}-{1}".format(self.name, i), self.dnf_base.conf, baseurl=[url_single], skip_if_unavailable=True) break except: # pylint: disable=bare-except pass else: self.log_error("No mirrors available") raise NameError('NoMirrorsAvailable')
def _tar_xz(self, archive_name, archive_dir, filepaths, unlink=True): archive_path = os.path.join(archive_dir, archive_name) archive_path_tmp = os.path.join(archive_dir, "{0}.tmp".format(archive_name)) tmpdir = get_temp_dir() tmpsubdir = os.path.join(tmpdir, "archive") unlink_paths = list(filepaths) try: os.makedirs(tmpsubdir) except OSError as ex: if ex.errno == errno.EEXIST: raise FafError("The directory '{0}' already exists" .format(tmpsubdir)) raise untar = None if os.path.isfile(archive_path): self.log_info("An existing archive found, will merge the contents") untar = self._untar_xz(archive_path) for filename in [os.path.join(untar, f) for f in os.listdir(untar)]: if os.path.isdir(filename) and filename.endswith("archive"): filepaths = [os.path.join(filename, f) for f in os.listdir(filename)] + filepaths break self.log_info("Creating symlinks") for filepath in filepaths: linkpath = os.path.join(tmpsubdir, os.path.basename(filepath)) # merge - do not overwrite already archived data try: self.log_debug("{0} ~> {1}".format(filepath, linkpath)) os.symlink(filepath, linkpath) except OSError as ex: if ex.errno != errno.EEXIST: raise self.log_debug("Already exists") self.log_info("Running tar") safe_popen("tar", "chJf", archive_path_tmp, "-C", tmpdir, "archive") os.rename(archive_path_tmp, archive_path) self.log_info("Cleaning up") if untar is not None: shutil.rmtree(untar, ignore_errors=True) if unlink: for path in unlink_paths: os.unlink(path) shutil.rmtree(tmpsubdir)
def __init__(self, name, *urls): """ Following `url` schemes are supported: http://, ftp://, file:// (used if full path is passed). """ super(Yum, self).__init__() self.yum_root = None self.load_config_to_self("yum_root", ["yum.root"], "/") self.name = name self.urls = urls self.yum_base = yum.YumBase() # pylint: disable=no-member self.yum_base.doConfigSetup(init_plugins=False, debuglevel=0, root=self.yum_root) self.yum_base.conf.cachedir = get_temp_dir("yum") self.yum_base.disablePlugins() self.yum_base.repos.disableRepo("*") for i, url in enumerate(urls): if isinstance(url, six.string_types): if url.startswith("/"): url = "file://{0}".format(url) # call str() on url, because if url is unicode, # list_packages will crash on el6 self.yum_base.add_enable_repo("faf_{0}-{1}".format( self.name, i), baseurls=[str(url)]) else: for url_single in url: if url_single.startswith("/"): url_single = "file://{0}".format(url_single) try: request.urlopen( os.path.join(url_single, "repodata/repomd.xml")) self.yum_base.add_enable_repo("faf-{0}-{1}".format( self.name, i), baseurls=[url_single]) break except: # pylint: disable=bare-except pass else: self.log_error("No mirrors available") raise NameError('NoMirrorsAvailable')
def __init__(self, name, *urls): """ Following `url` schemes are supported: http://, ftp://, file:// (used if full path is passed). """ super(Yum, self).__init__() self.yum_root = None self.load_config_to_self("yum_root", ["yum.root"], "/") self.name = name self.urls = urls self.yum_base = yum.YumBase() self.yum_base.doConfigSetup(init_plugins=False, debuglevel=0, root=self.yum_root) self.yum_base.conf.cachedir = get_temp_dir("yum") self.yum_base.disablePlugins() self.yum_base.repos.disableRepo("*") for i, url in enumerate(urls): if isinstance(url, six.string_types): if url.startswith("/"): url = "file://{0}".format(url) # call str() on url, because if url is unicode, # list_packages will crash on el6 self.yum_base.add_enable_repo("faf_{0}-{1}".format(self.name, i), baseurls=[str(url)]) else: for url_single in url: if url_single.startswith("/"): url_single = "file://{0}".format(url_single) try: request.urlopen(os.path.join(url_single, "repodata/repomd.xml")) self.yum_base.add_enable_repo("faf-{0}-{1}".format(self.name, i), baseurls=[url_single]) break except: pass else: self.log_error("No mirrors available") raise NameError('NoMirrorsAvailable')
def _untar_xz(self, archive): tmpdir = tempfile.mkdtemp(dir=get_temp_dir(), prefix=os.path.basename(archive)) safe_popen("tar", "xJf", archive, "-C", tmpdir) return tmpdir