def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] deltas = {} for root, dirs, files in os.walk(repo_uri): for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'component.xml': ctx.ui.info(_('Adding %s to component index') % fn) self.add_component(os.path.join(root, fn)) if fn == 'pspec.xml' and not skip_sources: self.add_spec(os.path.join(root, fn), repo_uri) if fn == 'distribution.xml': self.add_distro(os.path.join(root, fn)) try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name not in obsoletes_list: ctx.ui.info(_('Adding %s to package index') % pkg) self.add_package(pkg, deltas, repo_uri)
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] deltas = {} for root, dirs, files in os.walk(repo_uri): for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == "components.xml": self.add_components(os.path.join(root, fn)) if fn == "pspec.xml" and not skip_sources: self.add_spec(os.path.join(root, fn), repo_uri) if fn == "distribution.xml": self.add_distro(os.path.join(root, fn)) if fn == "groups.xml": self.add_groups(os.path.join(root, fn)) try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name not in obsoletes_list: ctx.ui.info(_("Adding %s to package index") % pkg) self.add_package(pkg, deltas, repo_uri)
def getPackageLists(pkgList): latest = {} for f in pkgList: try: name, full_version = util.parse_package_name(f) version, release, build = pisi.util.split_version(full_version) release = int(release) if build: build = int(build) if name in latest: lversion, lrelease, lbuild = latest[name] if lbuild and build: if lbuild > build: continue elif lrelease > release: continue latest[name] = full_version, release, build except: pass latestVersions = [] for pkg in latest: latestVersions.append("%s-%s" % (pkg, latest[pkg][0])) oldVersions = list(set(pkgList) - set(latestVersions)) return oldVersions, latestVersions
def minsandmaxes(): packages = [os.path.basename(x).split(".pisi")[0] for x in set(glob.glob("*.pisi")) - set(glob.glob("*.delta.pisi"))] versions = {} for file in packages: name, version = util.parse_package_name(file) versions.setdefault(name, []).append(Version(version)) mins = {} maxs = {} for pkg in list(versions.keys()): mins[pkg] = min(versions[pkg]) maxs[pkg] = max(versions[pkg]) return mins, maxs
def minsandmaxes(): packages = map(lambda x: os.path.basename(x).split(".eopkg")[0], set(glob.glob("*.eopkg")) - set(glob.glob("*.delta.eopkg"))) versions = {} for file in packages: name, version = util.parse_package_name(file) versions.setdefault(name, []).append(Version(version)) mins = {} maxs = {} for pkg in versions.keys(): mins[pkg] = min(versions[pkg]) maxs[pkg] = max(versions[pkg]) return mins, maxs
def clean_duplicates(): i_version = {} # installed versions replica = [] for pkg in os.listdir(pisi.util.join_path(pisi.api.ctx.config.lib_dir(), 'package')): (name, ver) = util.parse_package_name(pkg) if i_version.has_key(name): if Version(ver) > Version(i_version[name]): # found a greater version, older one is a replica replica.append(name + '-' + i_version[name]) i_version[name] = ver else: # found an older version which is a replica replica.append(name + '-' + ver) else: i_version[name] = ver for pkg in replica: pisi.util.clean_dir(pisi.util.join_path(pisi.api.ctx.config.lib_dir(), 'package', pkg))
def findUnneededFiles(): listdir = map(lambda x: os.path.basename(x), glob.glob("/var/cache/pisi/packages/*.pisi")) listdir.sort() dict = {} for f in listdir: name, ver = util.parse_package_name(f) version = ver.split(".pisi")[0] if dict.has_key(name): if Version(dict[name]) < Version(version): dict[name] = version else: dict[name] = version for f in dict: listdir.remove("%s-%s.pisi" % (f, dict[f])) return listdir
def findUnneededFiles(listdir): dict = {} for f in listdir: try: name, version = util.parse_package_name(f) if dict.has_key(name): if Version(dict[name]) < Version(version): dict[name] = version else: if version: dict[name] = version except: pass for f in dict: listdir.remove("%s-%s" % (f, dict[f])) return listdir
def getPackageLists(pkgList): latest = {} for f in pkgList: try: name, version = util.parse_package_name(f) if latest.has_key(name): if Version(latest[name]) < Version(version): latest[name] = version else: if version: latest[name] = version except: pass latestVersions = [] for pkg in latest: latestVersions.append("%s-%s" % (pkg, latest[pkg])) oldVersions = list(set(pkgList) - set(latestVersions)) return oldVersions, latestVersions
def fetch_remote_file(self, url): from fetcher import fetch_url dest = ctx.config.packages_dir() self.filepath = join(dest, url.filename()) sha1sum = None if exists(self.filepath): sha1sum = util.sha1_file(self.filepath) name, version = util.parse_package_name(basename(self.filepath)) if sha1sum != ctx.packagedb.get_package(name).packageHash: try: fetch_url(url, dest, ctx.ui.Progress) except pisi.fetcher.FetchError: # Bug 3465 if ctx.get_option('reinstall'): raise Error(_("There was a problem while fetching '%s'.\nThe package " "may have been upgraded. Please try to upgrade the package.") % url); raise else: ctx.ui.info(_('%s [cached]') % url.filename())
def getPackageLists(pkgList): latest = {} for f in pkgList: try: name, full_version = util.parse_package_name(f) version, release, build = pisi.util.split_version(full_version) release = int(release) if name in latest: lversion, lrelease = latest[name] if lrelease > release: continue latest[name] = full_version, release except: pass latestVersions = [] for pkg in latest: latestVersions.append("%s-%s" % (pkg, latest[pkg][0])) oldVersions = list(set(pkgList) - set(latestVersions)) return oldVersions, latestVersions
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} pkgs_sorted = False for fn in next(os.walk(repo_uri))[2]: if fn.endswith(ctx.const.delta_package_suffix) or fn.endswith(ctx.const.package_suffix): name, version = util.parse_package_name(fn) if name.split("-").pop() in ["devel", "32bit", "doc", "docs", "userspace"]: name = name[:-1 - len(name.split("-").pop())] pkgpath = os.path.join(repo_uri, name[0:4].lower() if name.startswith("lib") and len(name) > 3 else name.lower()[0], name.lower()) if not os.path.isdir(pkgpath): os.makedirs(pkgpath) ctx.ui.info("%-80.80s\r" % (_('Sorting: %s ') % fn), noln=False if ctx.config.get_option("verbose") else True) shutil.copy2(os.path.join(repo_uri, fn), pkgpath) os.remove(os.path.join(repo_uri, fn)) pkgs_sorted = True if pkgs_sorted: ctx.ui.info("%-80.80s\r" % '') for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend(add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = list(map(str, self.distribution.obsoletes)) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: sorted_pkgs = {} for pkg in latest_packages: key = re.search("\/((lib)?[\d\w])\/", pkg[0]) key = key.group(1) if key else os.path.dirname(pkg[0]) try: sorted_pkgs[key].append(pkg) except KeyError: sorted_pkgs[key] = [pkg] self.packages = [] for key, pkgs in sorted(sorted_pkgs.items()): ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... " % key)), noln=True) try: # Add binary packages to index using a process pool self.packages.extend(pool.map(add_package, pkgs)) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... done." % key))) ctx.ui.info("") pool.close() pool.join()
packages = map(lambda x: os.path.basename(x).split(".eopkg")[0], set(glob.glob("*.eopkg")) - set(glob.glob("*.delta.eopkg"))) versions = {} for file in packages: name, version = util.parse_package_name(file) versions.setdefault(name, []).append(Version(version)) mins = {} maxs = {} for pkg in versions.keys(): mins[pkg] = min(versions[pkg]) maxs[pkg] = max(versions[pkg]) return mins, maxs if __name__ == "__main__": mi, ma = minsandmaxes() for pkg in mi.keys(): old_pkg = "%s-%s.eopkg" % (pkg, str(mi[pkg])) new_pkg = "%s-%s.eopkg" % (pkg, str(ma[pkg])) name, version = util.parse_package_name(pkg) if not old_pkg == new_pkg: # skip if same if not os.path.exists("%s-%s-%s.delta.eopkg" % (name, str(mi[pkg].build), str(ma[pkg].build))): # skip if delta exists print "%s --> Min: %s Max: %s \n %s-%s-%s.delta.eopkg" % (pkg, old_pkg, new_pkg, name, str(mi[pkg].build), str(ma[pkg].build)) create_delta_package(old_pkg, new_pkg)
return packages def usage(miniMe): print """Usage: %s srcREP binREP (ex: %s /home/bahadir/repos/pardus/devel/kernel /home/bahadir/binary) """ % (miniMe, miniMe) sys.exit(1) if __name__ == "__main__": try: repSRC = sys.argv[1] except IndexError: usage(sys.argv[0]) try: repBIN = sys.argv[2] except IndexError: usage(sys.argv[0]) packages = getPackages(findPspec(repSRC)) binaries = {} for f in filter(lambda x: x.endswith(".pisi"), os.listdir(repBIN)): binaries[parse_package_name(f)[0]] = f print "Residuary binary packages:" for b in binaries: if b not in packages: print " %s" % binaries[b]
name, version = util.parse_package_name(file) versions.setdefault(name, []).append(Version(version)) mins = {} maxs = {} for pkg in versions.keys(): mins[pkg] = min(versions[pkg]) maxs[pkg] = max(versions[pkg]) return mins, maxs if __name__ == "__main__": mi, ma = minsandmaxes() for pkg in mi.keys(): old_pkg = "%s-%s.eopkg" % (pkg, str(mi[pkg])) new_pkg = "%s-%s.eopkg" % (pkg, str(ma[pkg])) name, version = util.parse_package_name(pkg) if not old_pkg == new_pkg: # skip if same if not os.path.exists( "%s-%s-%s.delta.eopkg" % (name, str(mi[pkg].build), str(ma[pkg].build))): # skip if delta exists print "%s --> Min: %s Max: %s \n %s-%s-%s.delta.eopkg" % ( pkg, old_pkg, new_pkg, name, str( mi[pkg].build), str(ma[pkg].build)) create_delta_package(old_pkg, new_pkg)
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == "components.xml": self.components.extend(add_components(os.path.join(root, fn))) if fn == "pspec.xml" and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == "distribution.xml": self.distribution = add_distro(os.path.join(root, fn)) if fn == "groups.xml": self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: try: # Add binary packages to index using a process pool self.packages = pool.map(add_package, latest_packages) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("") pool.close() pool.join()
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend( add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: try: # Add binary packages to index using a process pool self.packages = pool.map(add_package, latest_packages) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("") pool.close() pool.join()
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} pkgs_sorted = False for fn in os.walk(repo_uri).next()[2]: if fn.endswith(ctx.const.delta_package_suffix) or fn.endswith(ctx.const.package_suffix): pkgpath = os.path.join(repo_uri, util.parse_package_dir_path(fn)) if not os.path.isdir(pkgpath): os.makedirs(pkgpath) ctx.ui.info("%-80.80s\r" % (_('Sorting: %s ') % fn), noln = False if ctx.config.get_option("verbose") else True) shutil.copy2(os.path.join(repo_uri, fn), pkgpath) os.remove(os.path.join(repo_uri, fn)) pkgs_sorted = True if pkgs_sorted: ctx.ui.info("%-80.80s\r" % '') for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name, version = util.parse_package_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend(add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) except: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise try: obsoletes_list = map(str, self.distribution.obsoletes) except AttributeError: obsoletes_list = [] latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: sorted_pkgs = {} for pkg in latest_packages: key = re.search("\/((lib)?[\d\w])\/", pkg[0]) key = key.group(1) if key else os.path.dirname(pkg[0]) try: sorted_pkgs[key].append(pkg) except KeyError: sorted_pkgs[key] = [pkg] self.packages = [] for key, pkgs in sorted(sorted_pkgs.items()): ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... " % key)), noln=True) try: # Add binary packages to index using a process pool self.packages.extend(pool.map(add_package, pkgs)) except: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("%-80.80s\r" % (_("Adding packages from directory %s... done." % key))) ctx.ui.info("") pool.close() pool.join()
def usage(miniMe): print """Usage: %s srcREP binREP (ex: %s /home/bahadir/repos/pardus/devel/kernel /home/bahadir/binary) """ % (miniMe, miniMe) sys.exit(1) if __name__ == "__main__": try: repSRC = sys.argv[1] except IndexError: usage(sys.argv[0]) try: repBIN = sys.argv[2] except IndexError: usage(sys.argv[0]) packages = getPackages(findPspec(repSRC)) binaries = {} for f in filter(lambda x: x.endswith(".pisi"), os.listdir(repBIN)): binaries[parse_package_name(f)[0]] = f print "Residuary binary packages:" for b in binaries: if b not in packages: print " %s" % binaries[b]