def __init__( self, url, cache_dir, pkg_dir, proxy_addr="", proxy_user="", proxy_pass="", gpg_verify=True, ): self.url = url parts = url.rsplit('/dists/', 1) self.base_url = [parts[0]] # Make sure baseurl ends with / and urljoin will work correctly if self.base_url[0][-1] != '/': self.base_url[0] += '/' self.urls = self.base_url self.sslclientcert = self.sslclientkey = self.sslcacert = None self.proxy = proxy_addr self.proxy_username = proxy_user self.proxy_password = proxy_pass self.gpg_verify = gpg_verify self.basecachedir = cache_dir if not os.path.isdir(self.basecachedir): fileutils.makedirs(self.basecachedir, user='******', group='www') self.includepkgs = [] self.exclude = [] self.pkgdir = pkg_dir self.http_headers = {}
def setup_repo(self, repo, no_mirrors, ca_cert_file, client_cert_file, client_key_file): """ Setup repository and fetch metadata """ repo.metadata_expire=0 repo.mirrorlist = self.url repo.baseurl = [self.url] pkgdir = os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage') if not os.path.isdir(pkgdir): fileutils.makedirs(pkgdir, user=APACHE_USER, group=APACHE_GROUP) repo.pkgdir = pkgdir repo.sslcacert = ca_cert_file repo.sslclientcert = client_cert_file repo.sslclientkey = client_key_file repo.proxy = None repo.proxy_username = None repo.proxy_password = None if self.proxy_url: repo.proxy = self.proxy_url if '://' in self.proxy_url else 'http://' + self.proxy_url repo.proxy_username = self.proxy_user repo.proxy_password = self.proxy_pass if no_mirrors: repo.mirrorlist = "" self.digest=hashlib.sha256(self.url.encode('utf8')).hexdigest()[:16] self.dnfbase.repos.add(repo) self.repoid = repo.id try: logger = logging.getLogger('dnf') logger.setLevel(logging.ERROR) self.yumbase.repos[self.repoid].load() logger.setLevel(logging.WARN) except RepoError: # Dnf bug workaround. Mirrorlist was provided but none worked. Fallback to baseurl and load again. # Remove once dnf is fixed and add detection if mirrors failed. logger.setLevel(logging.WARN) repo.mirrorlist = "" no_mirrors = True self.dnfbase.repos[self.repoid].load() # Do not try to expand baseurl to other mirrors if no_mirrors: self.dnfbase.repos[self.repoid].urls = repo.baseurl # Make sure baseurl ends with / and urljoin will work correctly if self.dnfbase.repos[self.repoid].urls[0][-1] != '/': self.dnfbase.repos[self.repoid].urls[0] += '/' else: self.dnfbase.repos[self.repoid].urls = self.clean_urls(self.dnfbase.repos[self.repoid]._repo.getMirrors()) # pylint: disable=W0212 self.dnfbase.repos[self.repoid].urls=[url for url in self.dnfbase.repos[self.repoid].urls if '?' not in url] self.dnfbase.repos[self.repoid].basecachedir = os.path.join(CACHE_DIR, self.org) repoXML = type('', (), {})() repoXML.repoData = {} self.dnfbase.repos[self.repoid].repoXML = repoXML
def __init__(self, root, url, org): self.root = root self.baseurl = [url] self.basecachedir = os.path.join(CACHE_DIR, org) self.pkgdir = os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, org, 'stage') self.urls = self.baseurl # Make sure baseurl ends with / and urljoin will work correctly if self.urls[0][-1] != '/': self.urls[0] += '/' # Make sure root paths are created if not os.path.isdir(self.root): fileutils.makedirs(self.root, user='******', group='www') if not os.path.isdir(self.pkgdir): fileutils.makedirs(self.pkgdir, user='******', group='www') self.is_configured = False self.includepkgs = [] self.exclude = []
def __init__( self, url, cache_dir, pkg_dir, proxy_addr="", proxy_user="", proxy_pass="", gpg_verify=True, channel_label=None, ): self.url = url parts = url.rsplit('/dists/', 1) self.base_url = [parts[0]] parsed_url = urlparse.urlparse(url) query = urlparse.parse_qsl(parsed_url.query) new_query = [] suite = None component = None arch = None for qi in query: if qi[0] == "uyuni_suite": suite = qi[1] elif qi[0] == "uyuni_component": component = qi[1] elif qi[0] == "uyuni_arch": arch = qi[1] else: new_query.append(qi) if suite: parsed_url = parsed_url._replace( query=urlparse.urlencode(new_query)) base_url = urlparse.urlunparse(parsed_url) path_list = parsed_url.path.split("/") log2(0, 0, "Base URL: {}".format(base_url)) log2(0, 0, "Suite: {}".format(suite)) log2(0, 0, "Component: {}".format(component)) if "/" not in suite: path_list.append("dists") path_list.extend(suite.split("/")) if component: path_list.extend(component.split("/")) if "/" not in suite: if arch is None: rhnSQL.initDB() h = rhnSQL.prepare(""" SELECT ca.label AS arch_label FROM rhnChannel AS c LEFT JOIN rhnChannelArch AS ca ON c.channel_arch_id = ca.id WHERE c.label = :channel_label """) h.execute(channel_label=channel_label) row = h.fetchone_dict() if row and "arch_label" in row: aspl = row["arch_label"].split("-") if len(aspl) == 3 and aspl[0] == "channel" and aspl[ 2] == "deb": arch_trans = { "ia32": "i386", "arm": "armhf", } if aspl[1] in arch_trans: arch = arch_trans[aspl[1]] else: arch = aspl[1] if arch: log2(0, 0, "Channel architecture: {}".format(arch)) path_list.append("binary-{}".format(arch)) while "" in path_list: path_list.remove("") parsed_url = parsed_url._replace(path="/".join(path_list)) self.url = url = urlparse.urlunparse(parsed_url) self.base_url = [base_url] # Make sure baseurl ends with / and urljoin will work correctly if self.base_url[0][-1] != '/': self.base_url[0] += '/' self.urls = self.base_url self.sslclientcert = self.sslclientkey = self.sslcacert = None self.proxy = proxy_addr self.proxy_username = proxy_user self.proxy_password = proxy_pass self.gpg_verify = gpg_verify self.basecachedir = cache_dir if not os.path.isdir(self.basecachedir): fileutils.makedirs(self.basecachedir, user='******', group='www') self.includepkgs = [] self.exclude = [] self.pkgdir = pkg_dir self.http_headers = {}
def _safe_create(fname, user, group, mode): """ This function returns a file descriptor for the open file fname If the file is already there, it is truncated otherwise, all the directories up to it are created and the file is created as well. """ # There can be race conditions between the moment we check for the file # existence and when we actually create it, so retry if something fails tries = 5 while tries: tries = tries - 1 # we're really picky about what can we do if os.access(fname, os.F_OK): # file exists if not os.access(fname, os.R_OK | os.W_OK): raise UnreadableFileError() fd = os.open(fname, os.O_WRONLY | os.O_TRUNC) # We're done return fd # If directory does not exist, attempt to create it dirname = os.path.dirname(fname) if not os.path.isdir(dirname): try: #os.makedirs(dirname, 0755) makedirs(dirname, mode, user, group) except OSError: e = sys.exc_info()[1] # There is a window between the moment we check the disk and # the one we try to create the directory # We double-check the file existance here if not (e.errno == EEXIST and os.path.isdir(dirname)): # If the exception was thrown on a parent dir # check the subdirectory to go through next loop. if os.path.isdir(e.filename): continue # Pass exception through raise except: # Pass exception through raise # If we got here, it means the directory exists # file does not exist, attempt to create it # we pass most of the exceptions through try: fd = os.open(fname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, int('0644', 8)) except OSError: e = sys.exc_info()[1] # The file may be already there if e.errno == EEXIST and os.access(fname, os.F_OK): # Retry continue # Pass exception through raise # If we got here, the file is created, so break out of the loop setPermsPath(fname, user, group, mode) return fd # Ran out of tries; something is fishy # (if we manage to create or truncate the file, we've returned from the # function already) raise RuntimeError("Attempt to create file %s failed" % fname)
def setup_repo(self, repo, no_mirrors, ca_cert_file, client_cert_file, client_key_file): """ Setup repository and fetch metadata """ repo.metadata_expire = 0 repo.mirrorlist = self.url repo.baseurl = [self.url] with cfg_component('server.satellite') as CFG: pkgdir = os.path.join(CFG.MOUNT_POINT, CFG.PREPENDED_DIR, self.org, 'stage') if not os.path.isdir(pkgdir): fileutils.makedirs(pkgdir, user=APACHE_USER, group=APACHE_GROUP) repo.pkgdir = pkgdir repo.sslcacert = ca_cert_file repo.sslclientcert = client_cert_file repo.sslclientkey = client_key_file repo.proxy = None repo.proxy_username = None repo.proxy_password = None if self.proxy_url: repo.proxy = self.proxy_url if '://' in self.proxy_url else 'http://' + self.proxy_url repo.proxy_username = self.proxy_user repo.proxy_password = self.proxy_pass if no_mirrors: repo.mirrorlist = "" self.digest = hashlib.sha256(self.url.encode('utf8')).hexdigest()[:16] self.dnfbase.repos.add(repo) self.repoid = repo.id try: self.dnfbase.repos[self.repoid].load() # Don't use mirrors if there are none. if not self.clean_urls( self.dnfbase.repos[self.repoid]._repo.getMirrors()): no_mirrors = True # Reload repo just in case. repo.mirrorlist = "" self.dnfbase.repos[self.repoid].load() except RepoError as exc: raise RepoMDError(exc) # Do not try to expand baseurl to other mirrors if no_mirrors: self.dnfbase.repos[self.repoid].urls = repo.baseurl # Make sure baseurl ends with / and urljoin will work correctly if self.dnfbase.repos[self.repoid].urls[0][-1] != '/': self.dnfbase.repos[self.repoid].urls[0] += '/' else: self.dnfbase.repos[self.repoid].urls = self.clean_urls( self.dnfbase.repos[self.repoid]._repo.getMirrors()) # pylint: disable=W0212 self.dnfbase.repos[self.repoid].urls = [ url for url in self.dnfbase.repos[self.repoid].urls if '?' not in url ] self.dnfbase.repos[self.repoid].basecachedir = os.path.join( CACHE_DIR, self.org) repoXML = type('', (), {})() repoXML.repoData = {} self.dnfbase.repos[self.repoid].repoXML = repoXML