def plan_install_pkg_names(A, reinstall=False): # try to construct a inary graph of packages to # install / reinstall packagedb = inary.db.packagedb.PackageDB() installdb = inary.db.installdb.InstallDB() G_f = pgraph.PGraph(packagedb, installdb) # construct G_f G_f.reinstall = reinstall # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.add_package(x) B = A not_satisfied = dict() while len(B) > 0: Bp = set() for x in B: G_f.add_package(x) if ctx.config.values.general.allow_docs: dep = x + ctx.const.doc_package_end if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_pages: dep = x + ctx.const.info_package_end if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_devel: dep = x + ctx.const.devel_package_end if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_dbginfo: dep = x + ctx.const.debug_name_suffix if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_static: dep = x + ctx.const.static_name_suffix if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) B = Bp if not_satisfied: msg = _("Following packages are not satisfied:\n") for ns in not_satisfied: msg += (_( ' -> \"{0}\" dependency(s) of package \"{1}\" is not satisfied.\n' ).format(not_satisfied[ns], ns)) raise Exception(msg) order = G_f.topological_sort() order.reverse() return order
def plan_remove(A): # try to construct a inary graph of packages to # install / reinstall installdb = inary.db.installdb.InstallDB() packagedb = inary.db.packagedb.PackageDB() G_f = pgraph.PGraph(installdb) # construct G_f # find the (install closure) graph of G_f by package # set A using packagedb for x in A: G_f.add_package(x) B = A while len(B) > 0: Bp = set() for x in B: rev_deps = installdb.get_rev_deps(x) for (rev_dep, depinfo) in rev_deps: # we don't deal with uninstalled rev deps # and unsatisfied dependencies (this is important, too) # satisfied_by_any_installed_other_than is for AnyDependency if installdb.has_package(rev_dep) and \ depinfo.satisfied_by_installed() and not \ depinfo.satisfied_by_any_installed_other_than(x): if not rev_dep in G_f.vertices(): Bp.add(rev_dep) G_f.add_plain_dep(rev_dep, x) #IDEA: Optimize if ctx.config.values.general.allow_docs: doc_package = x + ctx.const.doc_package_end if packagedb.has_package(doc_package): Bp.add(doc_package) if ctx.config.values.general.allow_pages: info_package = x + ctx.const.info_package_end if packagedb.has_package(info_package): Bp.add(info_package) if ctx.config.values.general.allow_dbginfo: dbg_package = x + ctx.const.debug_name_suffix if packagedb.has_package(dbg_package): Bp.add(dbg_package) if ctx.config.values.general.allow_static: static_package = x + ctx.const.static_name_suffix if packagedb.has_package(static_package): Bp.add(static_package) B = Bp if ctx.config.get_option('debug'): G_f.write_graphviz(sys.stdout) order = G_f.topological_sort() return G_f, order
def plan_install_pkg_names(A): # try to construct a inary graph of packages to # install / reinstall ctx.ui.info(_('Checking dependencies for install...')) packagedb = inary.db.packagedb.PackageDB() G_f = pgraph.PGraph(packagedb) # construct G_f # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.add_package(x) B = A while len(B) > 0: Bp = set() for x in B: pkg = packagedb.get_package(x) for dep in pkg.runtimeDependencies(): ctx.ui.info(_(' -> checking {}').format(str(dep)), verbose=True) # we don't deal with already *satisfied* dependencies if not dep.satisfied_by_installed(): if not dep.satisfied_by_repo(packagedb=packagedb): raise Exception(_('\"{0}\" dependency of package \"{1}\" is not satisfied.').format(dep, pkg.name)) if not dep.package in G_f.vertices(): Bp.add(str(dep.package)) G_f.add_dep(x, dep) if ctx.config.values.general.allow_docs: dep = x + ctx.const.doc_package_end if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_pages: dep = x + ctx.const.info_package_end if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_dbginfo: dep = x + ctx.const.debug_name_suffix if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) if ctx.config.values.general.allow_static: dep = x + ctx.const.static_name_suffix if packagedb.has_package(dep): Bp.add(dep) G_f.add_package(dep) B = Bp if ctx.config.get_option('debug'): G_f.write_graphviz(sys.stdout) order = G_f.topological_sort() order.reverse() return G_f, order
def plan_remove(A): # try to construct a inary graph of packages to # install / reinstall installdb = inary.db.installdb.InstallDB() packagedb = inary.db.packagedb.PackageDB() G_f = pgraph.PGraph(packagedb, installdb) # construct G_f # find the (install closure) graph of G_f by package # set A using packagedb for x in A: G_f.add_package_revdep(x) B = A while len(B) > 0: Bp = set() for x in B: G_f.add_package_revdep(x) if ctx.config.values.general.allow_docs: doc_package = x + ctx.const.doc_package_end if packagedb.has_package(doc_package): Bp.add(doc_package) if ctx.config.values.general.allow_pages: info_package = x + ctx.const.info_package_end if packagedb.has_package(info_package): Bp.add(info_package) if ctx.config.values.general.allow_dbginfo: dbg_package = x + ctx.const.debug_name_suffix if packagedb.has_package(dbg_package): Bp.add(dbg_package) if ctx.config.values.general.allow_static: static_package = x + ctx.const.static_name_suffix if packagedb.has_package(static_package): Bp.add(static_package) B = Bp order = G_f.topological_sort() return order
def plan_upgrade(A, force_replaced=True, replaces=None): # FIXME: remove force_replaced # try to construct a inary graph of packages to # install / reinstall packagedb = inary.db.packagedb.PackageDB() G_f = pgraph.PGraph(packagedb) # construct G_f A = set(A) # Force upgrading of installed but replaced packages or else they will be removed (they are obsoleted also). # This is not wanted for a replaced driver package (eg. nvidia-X). # # FIXME: this is also not nice. this would not be needed if replaced packages are not written as obsoleted also. # But if they are not written obsoleted "inary index" indexes them if force_replaced: if replaces is None: replaces = packagedb.get_replaces() A |= set(inary.util.flatten_list(list(replaces.values()))) # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.add_package(x) installdb = inary.db.installdb.InstallDB() def add_runtime_deps(pkg, Bp): for dep in pkg.runtimeDependencies(): # add packages that can be upgraded if installdb.has_package( dep.package) and dep.satisfied_by_installed(): continue if dep.satisfied_by_repo(): if not dep.package in G_f.vertices(): Bp.add(str(dep.package)) # Always add the dependency info although the dependant # package is already a member of this graph. Upgrade order # might change if the dependency info differs from the # previous ones. G_f.add_dep(pkg.name, dep) else: ctx.ui.error( _('Dependency \"{0}\" of \"{1}\" cannot be satisfied.'). format(dep, pkg.name)) raise Exception(_("Upgrade is not possible.")) def add_resolvable_conflicts(pkg, Bp): """Try to resolve conflicts by upgrading If a package B conflicts with an old version of package A and does not conflict with the new version of A, add A to the upgrade list. """ for conflict in pkg.conflicts: if conflict.package in G_f.vertices(): # Conflicting package is already in the upgrade list. continue if not inary.analyzer.conflict.installed_package_conflicts( conflict): # Conflicting package is not installed. # No need to deal with it. continue if not packagedb.has_package(conflict.package): # Conflicting package is not available in repo. # Installed package will be removed. continue new_pkg = packagedb.get_package(conflict.package) if conflict.satisfies_relation(new_pkg.version, new_pkg.release): # Package still conflicts with the repo package. # Installed package will be removed. continue # Upgrading the package will resolve conflict. # Add it to the upgrade list. Bp.add(conflict.package) G_f.add_package(conflict.package) def add_broken_revdeps(pkg, Bp): # Search reverse dependencies to see if anything # should be upgraded rev_deps = installdb.get_rev_deps(pkg.name) for rev_dep, depinfo in rev_deps: # add only installed but unsatisfied reverse dependencies if rev_dep in G_f.vertices() or depinfo.satisfied_by_repo(): continue if is_upgradable(rev_dep): Bp.add(rev_dep) G_f.add_plain_dep(rev_dep, pkg.name) def add_needed_revdeps(pkg, Bp): # Search for reverse dependency update needs of to be upgraded packages # check only the installed ones. version, release, build = installdb.get_version(pkg.name) actions = pkg.get_update_actions(release) packages = actions.get("reverseDependencyUpdate") if packages: for target_package in packages: for name, dep in installdb.get_rev_deps(target_package): if name in G_f.vertices() or not is_upgradable(name): continue Bp.add(name) G_f.add_plain_dep(name, target_package) while A: Bp = set() for x in A: pkg = packagedb.get_package(x) add_runtime_deps(pkg, Bp) add_resolvable_conflicts(pkg, Bp) if installdb.has_package(x): add_broken_revdeps(pkg, Bp) add_needed_revdeps(pkg, Bp) A = Bp if ctx.config.get_option('debug'): G_f.write_graphviz(sys.stdout) order = G_f.topological_sort() order.reverse() return G_f, order
def plan_upgrade(A, force_replaced=True, replaces=None): # FIXME: remove force_replaced # try to construct a inary graph of packages to # install / reinstall G_f = pgraph.PGraph() # construct G_f installdb = G_f.get_installdb() packagedb = G_f.get_packagedb() # Force upgrading of installed but replaced packages or else they will be removed (they are obsoleted also). A = set(A) # This is not wanted for a replaced driver package (eg. nvidia-X). # # FIXME: this is also not nice. this would not be needed if replaced packages are not written as obsoleted also. # But if they are not written obsoleted "inary index" indexes them if force_replaced: if replaces is None: replaces = packagedb.get_replaces() A |= set(inary.util.flatten_list(list(replaces.values()))) # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.add_package(x) def add_resolvable_conflicts(pkg, Bp): """Try to resolve conflicts by upgrading If a package B conflicts with an old version of package A and does not conflict with the new version of A, add A to the upgrade list. """ for conflict in pkg.conflicts: if conflict.package in G_f.vertices(): # Conflicting package is already in the upgrade list. continue if not inary.analyzer.conflict.installed_package_conflicts( conflict): # Conflicting package is not installed. # No need to deal with it. continue if not packagedb.has_package(conflict.package): # Conflicting package is not available in repo. # Installed package will be removed. continue new_pkg = packagedb.get_package(conflict.package) if conflict.satisfies_relation(new_pkg.version, new_pkg.release): # Package still conflicts with the repo package. # Installed package will be removed. continue # Upgrading the package will resolve conflict. # Add it to the upgrade list. Bp.add(conflict.package) G_f.add_package(conflict.package) def add_broken_revdeps(pkg, Bp): # Search reverse dependencies to see if anything # should be upgraded rev_deps = installdb.get_rev_deps(pkg.name) for rev_dep, depinfo in rev_deps: # add only installed but unsatisfied reverse dependencies if rev_dep in G_f.vertices() or depinfo.satisfied_by_repo(): continue if is_upgradable(rev_dep, installdb, packagedb): Bp.add(rev_dep) G_f.add_plain_dep(rev_dep, pkg.name) def add_needed_revdeps(pkg, Bp): # Search for reverse dependency update needs of to be upgraded packages # check only the installed ones. release = installdb.get_release(pkg.name) actions = pkg.get_update_actions(release) packages = actions.get("reverseDependencyUpdate") if packages: for target_package in packages: for name in installdb.get_rev_dep_names(target_package): if name in G_f.vertices() or not is_upgradable( name, installdb, packagedb): continue Bp.add(name) G_f.add_plain_dep(name, target_package) while A: Bp = set() for x in A: G_f.add_package(x) pkg = packagedb.get_package(x) add_resolvable_conflicts(pkg, Bp) if installdb.has_package(x): add_broken_revdeps(pkg, Bp) add_needed_revdeps(pkg, Bp) A = Bp order = G_f.topological_sort() order.reverse() return order
def install_pkg_files(package_URIs, reinstall=False): """install a number of inary package files""" installdb = inary.db.installdb.InstallDB() ctx.ui.debug('A = {}'.format(str(package_URIs))) for x in package_URIs: if not x.endswith(ctx.const.package_suffix): raise Exception( _('Mixing file names and package names not supported yet.')) # filter packages that are already installed tobe_installed, already_installed = [], set() if not reinstall: for x in package_URIs: pkg_name = util.parse_package_name_get_name(os.path.basename(x)) if installdb.has_package(pkg_name): already_installed.add(pkg_name) else: tobe_installed.append(x) if already_installed: ctx.ui.warning( _("The following package(s) are already installed " "and are not going to be installed again:")) ctx.ui.info(util.format_by_columns(sorted(already_installed))) package_URIs = tobe_installed if ctx.config.get_option('ignore_dependency'): # simple code path then for x in package_URIs: atomicoperations.install_single_file(x, reinstall) return True # read the package information into memory first # regardless of which distribution they come from d_t = {} dfn = {} for x in package_URIs: try: package = inary.package.Package(x) package.read() except zipfile.BadZipfile: # YALI needed to get which file is broken raise zipfile.BadZipfile(x) name = str(package.metadata.package.name) d_t[name] = package.metadata.package dfn[name] = x # check packages' DistributionReleases and Architecture if not ctx.get_option('ignore_check'): for x in list(d_t.keys()): pkg = d_t[x] if pkg.distributionRelease > ctx.config.values.general.distribution_release: raise Exception( _('Package \"{0}\" is not compatible with your distribution release \'{1}\' \'{2}\'.' ).format(x, ctx.config.values.general.distribution, ctx.config.values.general.distribution_release)) if pkg.architecture != ctx.config.values.general.architecture: raise Exception( _('Package \"{0}\" (\'{1}\') is not compatible with your \'{2}\' architecture.' ).format(x, pkg.architecture, ctx.config.values.general.architecture)) def satisfiesDep(dep): # is dependency satisfied among available packages # or packages to be installed? return dep.satisfied_by_installed() or dep.satisfied_by_dict_repo(d_t) # for this case, we have to determine the dependencies # that aren't already satisfied and try to install them # from the repository dep_unsatis = [] for name in list(d_t.keys()): pkg = d_t[name] deps = pkg.runtimeDependencies() for dep in deps: if not satisfiesDep(dep) and dep.package not in [ x.package for x in dep_unsatis ]: dep_unsatis.append(dep) # now determine if these unsatisfied dependencies could # be satisfied by installing packages from the repo for dep in dep_unsatis: if not dep.satisfied_by_repo() and not ctx.config.get_option( 'ignore_satisfy'): raise Exception( _('External dependencies not satisfied: \"{}\", \"{}\"'). format(dep, name)) # if so, then invoke install_pkg_names extra_packages = [x.package for x in dep_unsatis] if extra_packages: ctx.ui.warning( _("The following packages will be installed " "in order to satisfy dependencies:")) ctx.ui.info(util.format_by_columns(sorted(extra_packages))) if not ctx.ui.confirm(_('Would you like to continue?')): raise Exception(_('External dependencies not satisfied.')) install_pkg_names(extra_packages, reinstall=False, extra=False) class PackageDB: @staticmethod def get_package(key, repo=None): return d_t[str(key)] packagedb = PackageDB() installdb = inary.db.installdb.InstallDB() A = list(d_t.keys()) if len(A) == 0: ctx.ui.info(_('No packages to install.')) return # try to construct a inary graph of packages to # install / reinstall G_f = pgraph.PGraph(packagedb, installdb) # construct G_f G_f.reinstall = reinstall # find the "install closure" graph of G_f by package # set A using packagedb for x in A: G_f.packages.append(x) B = A while len(B) > 0: Bp = set() for x in B: pkg = packagedb.get_package(x) G_f.add_package(x) # for dep in pkg.runtimeDependencies(): # G_f.add_package(dep) B = Bp order = G_f.topological_sort() if not ctx.get_option('ignore_package_conflicts'): conflicts = operations.helper.check_conflicts(order, packagedb) if conflicts: operations.remove.remove_conflicting_packages(conflicts) order.reverse() ctx.ui.info(_('Installation order: ') + util.strlist(order)) if ctx.get_option('dry_run'): return True ctx.ui.notify(ui.packagestogo, order=order) for x in order: atomicoperations.install_single_file(dfn[x], reinstall) return True
def plan_emerge(A): sourcedb = inary.db.sourcedb.SourceDB() installdb = inary.db.installdb.InstallDB() # try to construct a inary graph of packages to # install / reinstall G_f = pgraph.PGraph(sourcedb, installdb) def get_spec(name): if sourcedb.has_spec(name): return sourcedb.get_spec(name) else: raise Exception( _('Cannot find source package: \"{}\"').format(name)) def get_src(name): return get_spec(name).source def add_src(src): if not str(src.name) in G_f.vertices(): # TODO replace this shitty way with a function G_f.packages.append(src.name) def pkgtosrc(pkg): return sourcedb.pkgtosrc(pkg) # setup first # specfiles = [ sourcedb.get_source(x)[1] for x in A ] # pkgtosrc = {} B = A install_list = set() need_build = [] skip_list = set() while len(B) > 0: Bp = set() for x in B: sf = get_spec(x) src = sf.source add_src(src) # add dependencies def find_build_dep(A): # need_build is build list # order_build is build list from input # A is current process list # skip_list is finished list for i in A: if i in need_build or \ i in order_build or \ i in A or \ i in skip_list: return else: pkg = pkgtosrc(i) need_build.insert(0, pkg) src = get_spec(pkg).source for dep in src.buildDependencies: if not installdb.has_package(dep.package): if dep.package not in install_list: find_build_dep([dep.package]) else: skip_list.add(dep.package) def process_dep(dep): if not dep.satisfied_by_installed(): # TODO: add half-emerge support if sourcedb.get_pkg_src()[ dep.package] or dep.satisfied_by_repo(): install_list.add(dep.package) return srcdep = pkgtosrc(dep.package) G_f.packages.append(dep.package) for builddep in src.buildDependencies: process_dep(builddep) for pkg in sf.packages: for rtdep in pkg.packageDependencies: process_dep(rtdep) B = Bp order_build = G_f.topological_sort() order_build.reverse() find_build_dep(install_list) # TODO: add half-emerge bupport # order_inst = inary.operations.install.plan_install_pkg_names(install_list) return need_build, order_build