def start(self, archive, url, basename, size, text): if os.path.exists(archive): self.exist_size = os.path.getsize(archive) self.filename = util.remove_suffix(ctx.const.partial_suffix, basename) self.url = url self.basename = basename self.total_size = size or 0 self.text = text self.now = lambda: time() self.t_diff = lambda: self.now() - self.s_time self.s_time = self.now()
def start(self, archive, url, basename, size, text): if os.path.exists(archive): self.exist_size = os.path.getsize(archive) self.filename = util.remove_suffix(ctx.const.partial_suffix, basename) self.url = url self.basename = basename self.total_size = size or 0 self.text = text self.now = lambda: time.time() self.t_diff = lambda: self.now() - self.s_time self.s_time = self.now()
def unpack_dir(self, target_dir): self.file_path = util.remove_suffix(".Z", self.file_path) ret, out, err = util.run_batch( "uncompress -cf %s.Z > %s" % (self.file_path, self.file_path)) if ret != 0: raise RuntimeError( _("Problem occured while uncompressing %s.Z file") % self.file_path) self.tar = tarfile.open(self.file_path) oldwd = None try: # Don't fail if CWD doesn't exist (#6748) oldwd = os.getcwd() except OSError: pass os.chdir(target_dir) uid = os.getuid() gid = os.getgid() for tarinfo in self.tar: self.tar.extract(tarinfo) # tarfile.extract does not honor umask. It must be honored # explicitly. See --no-same-permissions option of tar(1), # which is the deafult behaviour. # # Note: This is no good while installing a pisi package. # Thats why this is optional. if self.no_same_permissions and not os.path.islink(tarinfo.name): os.chmod(tarinfo.name, tarinfo.mode & ~ctx.const.umask) if self.no_same_owner: if not os.path.islink(tarinfo.name): os.chown(tarinfo.name, uid, gid) else: os.lchown(tarinfo.name, uid, gid) # Bug #10680 and addition for tarZ files os.unlink(self.file_path) try: if oldwd: os.chdir(oldwd) # Bug #6748 except OSError: pass self.tar.close()
def extract_install(self, outdir): if self.impl.has_file(ctx.const.install_tar_lzma): lzmafile = os.path.join(ctx.config.tmp_dir(), ctx.const.install_tar_lzma) self.extract_file(ctx.const.install_tar_lzma, ctx.config.tmp_dir()) tar = archive.ArchiveTar(lzmafile, 'tarlzma', False, False) tar.unpack_dir(outdir) # cleanup install.tar.lzma and install.tar after installing if os.path.exists(lzmafile): os.unlink(lzmafile) lzmafile = util.remove_suffix(ctx.const.lzma_suffix, lzmafile) if os.path.exists(lzmafile): os.unlink(lzmafile) else: self.extract_dir_flat('install', outdir)
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] deltas = {} for root, dirs, files in os.walk(repo_uri): for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.add_components(os.path.join(root, fn)) if fn == 'pspec.xml' and not skip_sources: self.add_spec(os.path.join(root, fn), repo_uri) if fn == 'distribution.xml': self.add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.add_groups(os.path.join(root, fn)) try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(pkg_name, ctx.const.debug_name_suffix) if pkg_name not in obsoletes_list: ctx.ui.info(_('Adding %s to package index') % pkg) self.add_package(pkg, deltas, repo_uri)
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == "components.xml": self.components.extend(add_components(os.path.join(root, fn))) if fn == "pspec.xml" and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == "distribution.xml": self.distribution = add_distro(os.path.join(root, fn)) if fn == "groups.xml": self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: try: # Add binary packages to index using a process pool self.packages = pool.map(add_package, latest_packages) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("") pool.close() pool.join()
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend( add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: try: # Add binary packages to index using a process pool self.packages = pool.map(add_package, latest_packages) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("") pool.close() pool.join()
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} pkgs_sorted = False for fn in next(os.walk(repo_uri))[2]: if fn.endswith(ctx.const.delta_package_suffix) or fn.endswith(ctx.const.package_suffix): name, version = util.parse_package_name(fn) if name.split("-").pop() in ["devel", "32bit", "doc", "docs", "userspace"]: name = name[:-1 - len(name.split("-").pop())] pkgpath = os.path.join(repo_uri, name[0:4].lower() if name.startswith("lib") and len(name) > 3 else name.lower()[0], name.lower()) if not os.path.isdir(pkgpath): os.makedirs(pkgpath) ctx.ui.info("%-80.80s\r" % (_('Sorting: %s ') % fn), noln=False if ctx.config.get_option("verbose") else True) shutil.copy2(os.path.join(repo_uri, fn), pkgpath) os.remove(os.path.join(repo_uri, fn)) pkgs_sorted = True if pkgs_sorted: ctx.ui.info("%-80.80s\r" % '') for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend(add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = list(map(str, self.distribution.obsoletes)) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: sorted_pkgs = {} for pkg in latest_packages: key = re.search("\/((lib)?[\d\w])\/", pkg[0]) key = key.group(1) if key else os.path.dirname(pkg[0]) try: sorted_pkgs[key].append(pkg) except KeyError: sorted_pkgs[key] = [pkg] self.packages = [] for key, pkgs in sorted(sorted_pkgs.items()): ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... " % key)), noln=True) try: # Add binary packages to index using a process pool self.packages.extend(pool.map(add_package, pkgs)) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... done." % key))) ctx.ui.info("") pool.close() pool.join()
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} pkgs_sorted = False for fn in os.walk(repo_uri).next()[2]: if fn.endswith(ctx.const.delta_package_suffix) or fn.endswith(ctx.const.package_suffix): pkgpath = os.path.join(repo_uri, util.parse_package_dir_path(fn)) if not os.path.isdir(pkgpath): os.makedirs(pkgpath) ctx.ui.info("%-80.80s\r" % (_('Sorting: %s ') % fn), noln = False if ctx.config.get_option("verbose") else True) shutil.copy2(os.path.join(repo_uri, fn), pkgpath) os.remove(os.path.join(repo_uri, fn)) pkgs_sorted = True if pkgs_sorted: ctx.ui.info("%-80.80s\r" % '') for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend(add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: sorted_pkgs = {} for pkg in latest_packages: key = re.search("\/((lib)?[\d\w])\/", pkg[0]) key = key.group(1) if key else os.path.dirname(pkg[0]) try: sorted_pkgs[key].append(pkg) except KeyError: sorted_pkgs[key] = [pkg] self.packages = [] for key, pkgs in sorted(sorted_pkgs.items()): ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... " % key)), noln=True) try: # Add binary packages to index using a process pool self.packages.extend(pool.map(add_package, pkgs)) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... done." % key))) ctx.ui.info("") pool.close() pool.join()