def __getpackageurl(package): packagedb = inary.db.packagedb.PackageDB() repodb = inary.db.repodb.RepoDB() pkg = util.parse_package_name_get_name(package) reponame = None try: reponame = packagedb.which_repo(pkg) except Exception: # Maybe this package is obsoluted from repository for repo in repodb.get_binary_repos(): if pkg in packagedb.get_obsoletes(repo): reponame = repo if not reponame: raise PackageNotFound repourl = repodb.get_repo_url(reponame) ctx.ui.info( _("Package \"{0}\" found in repository \"{1}\".").format( pkg, reponame)) # return _possible_ url for this package return os.path.join(os.path.dirname(repourl), util.parse_package_dir_path(package), package)
def index(self, repo_uri, skip_sources=False): self.repo_dir = repo_uri packages = [] specs = [] deltas = {} ctx.ui.info(_("* Generating index tree...\n"), color="cyan") pkgs_sorted = False for fn in os.walk(repo_uri).__next__()[2]: if fn.endswith(ctx.const.delta_package_suffix) or fn.endswith( ctx.const.package_suffix): pkgpath = os.path.join(repo_uri, util.parse_package_dir_path(fn)) if not os.path.isdir(pkgpath): os.makedirs(pkgpath) ctx.ui.info( "{:80.80}\r".format(_(' -> Sorting: \"{}\"').format(fn)), noln=False if ctx.config.get_option("verbose") else True) shutil.copy2(os.path.join(repo_uri, fn), pkgpath) os.remove(os.path.join(repo_uri, fn)) pkgs_sorted = True if pkgs_sorted: ctx.ui.info("{:80.80}\r".format( util.colorize(_(' * Sorted: \"{}\"').format(fn), color="green"))) for root, dirs, files in os.walk(repo_uri): # Filter hidden directories # TODO: Add --exclude-dirs parameter to CLI and filter according # directories here dirs[:] = [d for d in dirs if not d.startswith(".")] for fn in files: if fn.endswith(ctx.const.delta_package_suffix): name = util.parse_package_name_get_name(fn) deltas.setdefault(name, []).append(os.path.join(root, fn)) elif fn.endswith(ctx.const.package_suffix): packages.append(os.path.join(root, fn)) if fn == 'components.xml': self.components.extend( add_components(os.path.join(root, fn))) if fn == 'pspec.xml' and not skip_sources: specs.append((os.path.join(root, fn), repo_uri)) if fn == 'distribution.xml': self.distribution = add_distro(os.path.join(root, fn)) if fn == 'groups.xml': self.groups.extend(add_groups(os.path.join(root, fn))) ctx.ui.info("") # Create a process pool, as many processes as the number of CPUs we # have try: obsoletes_list = list(map(str, self.distribution.obsoletes)) except AttributeError: obsoletes_list = [] if obsoletes_list: ctx.ui.info(_( " * Added obsoleted packages: [ {} ]".format(obsoletes_list)), color="blue", noln=False) pool = multiprocessing.Pool() # Before calling pool.map check if list is empty or not: python#12157 if specs: ctx.ui.info(_(" * Adding source packages: "), color="blue", noln=False) try: # Add source packages to index using a process pool self.specs = pool.map(add_spec, specs) ctx.ui.info("\n") except BaseException: # If an exception occurs (like a keyboard interrupt), # immediately terminate worker processes and propagate # exception. (CLI honors KeyboardInterrupt exception, if you're # not using CLI, you must handle KeyboardException yourself) pool.terminate() pool.join() ctx.ui.info("") raise latest_packages = [] for pkg in util.filter_latest_packages(packages): pkg_name = util.parse_package_name(os.path.basename(pkg))[0] if pkg_name.endswith(ctx.const.debug_name_suffix): pkg_name = util.remove_suffix(ctx.const.debug_name_suffix, pkg_name) if pkg_name not in obsoletes_list: # Currently, multiprocessing.Pool.map method accepts methods # with single parameters only. So we have to send our # parameters as a tuple to workaround that latest_packages.append((pkg, deltas, repo_uri)) # Before calling pool.map check if list is empty or not: python#12157 if latest_packages: sorted_pkgs = {} for pkg in latest_packages: key = re.search(r"\/((lib)?[\d\w])\/", pkg[0]) key = key.group(1) if key else os.path.dirname(pkg[0]) try: sorted_pkgs[key].append(pkg) except KeyError: sorted_pkgs[key] = [pkg] self.packages = [] ctx.ui.info(_(" * Adding binary packages: "), color="blue", noln=False) for key, pkgs in sorted(sorted_pkgs.items()): ctx.ui.info("{:80.80}\r".format( _(" -> Adding packages from directory \"{}\"... ".format( key))), noln=True) try: # Add binary packages to index using a process pool self.packages.extend(pool.map(add_package, pkgs)) except BaseException: pool.terminate() pool.join() ctx.ui.info("") raise ctx.ui.info("{:80.80}\r".format( _(" * Adding packages from directory \"{}\"... done.". format(key))), color="green", noln=False) ctx.ui.info(_("* Writing index file."), color="blue") pool.close() pool.join()
def install_pkg_files(package_URIs, reinstall=False): """install a number of inary package files""" installdb = inary.db.installdb.InstallDB() ctx.ui.debug('A = {}'.format(str(package_URIs))) for x in package_URIs: if not x.endswith(ctx.const.package_suffix): raise Exception( _('Mixing file names and package names not supported yet.')) # filter packages that are already installed tobe_installed, already_installed = [], set() if not reinstall: for x in package_URIs: pkg_name = util.parse_package_name_get_name(os.path.basename(x)) if installdb.has_package(pkg_name): already_installed.add(pkg_name) else: tobe_installed.append(x) if already_installed: ctx.ui.warning( _("The following package(s) are already installed " "and are not going to be installed again:")) ctx.ui.info(util.format_by_columns(sorted(already_installed))) package_URIs = tobe_installed if ctx.config.get_option('ignore_dependency'): # simple code path then for x in package_URIs: atomicoperations.install_single_file(x, reinstall) return True # read the package information into memory first # regardless of which distribution they come from d_t = {} dfn = {} for x in package_URIs: try: package = inary.package.Package(x) package.read() except zipfile.BadZipfile: # YALI needed to get which file is broken raise zipfile.BadZipfile(x) name = str(package.metadata.package.name) d_t[name] = package.metadata.package dfn[name] = x # check packages' DistributionReleases and Architecture if not ctx.get_option('ignore_check'): for x in list(d_t.keys()): pkg = d_t[x] if pkg.distributionRelease > ctx.config.values.general.distribution_release: raise Exception( _('Package \"{0}\" is not compatible with your distribution release \'{1}\' \'{2}\'.' ).format(x, ctx.config.values.general.distribution, ctx.config.values.general.distribution_release)) if pkg.architecture != ctx.config.values.general.architecture: raise Exception( _('Package \"{0}\" (\'{1}\') is not compatible with your \'{2}\' architecture.' ).format(x, pkg.architecture, ctx.config.values.general.architecture)) def satisfiesDep(dep): # is dependency satisfied among available packages # or packages to be installed? return dep.satisfied_by_installed() or dep.satisfied_by_dict_repo(d_t) # for this case, we have to determine the dependencies # that aren't already satisfied and try to install them # from the repository dep_unsatis = [] for name in list(d_t.keys()): pkg = d_t[name] deps = pkg.runtimeDependencies() for dep in deps: if not satisfiesDep(dep) and dep.package not in [ x.package for x in dep_unsatis ]: dep_unsatis.append(dep) # now determine if these unsatisfied dependencies could # be satisfied by installing packages from the repo for dep in dep_unsatis: if not dep.satisfied_by_repo() and not ctx.config.get_option( 'ignore_satisfy'): raise Exception( _('External dependencies not satisfied: \"{}\", \"{}\"'). format(dep, name)) # if so, then invoke install_pkg_names extra_packages = [x.package for x in dep_unsatis] if extra_packages: ctx.ui.warning( _("The following packages will be installed " "in order to satisfy dependencies:")) ctx.ui.info(util.format_by_columns(sorted(extra_packages))) if not ctx.ui.confirm(_('Would you like to continue?')): raise Exception(_('External dependencies not satisfied.')) install_pkg_names(extra_packages, reinstall=False, extra=False) class PackageDB: @staticmethod def get_package(key, repo=None): return d_t[str(key)] packagedb = PackageDB() installdb = inary.db.installdb.InstallDB() A = list(d_t.keys()) if len(A) == 0: ctx.ui.info(_('No packages to install.')) return # try to construct a inary graph of packages to # install / reinstall G_f = pgraph.PGraph(packagedb, installdb) # construct G_f G_f.reinstall = reinstall # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.packages.append(x) B = A while len(B) > 0: Bp = set() for x in B: pkg = packagedb.get_package(x) G_f.add_package(x) # for dep in pkg.runtimeDependencies(): # G_f.add_package(dep) B = Bp order = G_f.topological_sort() if not ctx.get_option('ignore_package_conflicts'): conflicts = operations.helper.check_conflicts(order, packagedb) if conflicts: operations.remove.remove_conflicting_packages(conflicts) order.reverse() ctx.ui.info(_('Installation order: ') + util.strlist(order)) if ctx.get_option('dry_run'): return True ctx.ui.notify(ui.packagestogo, order=order) for x in order: atomicoperations.install_single_file(dfn[x], reinstall) return True