def _order_debootstrap(dummy, current_order): """Add the packages needed by debootstrap unless otherwise asked not to.""" conf = picax.config.get_config() log = picax.log.get_logger() if not conf["no_debootstrap"]: new_order = current_order[:] bootstrap_dist = conf["repository_list"][0][0] pkg_names = [] try: debootstrap_pipe = os.popen( "/usr/sbin/debootstrap --print-debs %s" % (bootstrap_dist, )) for line in debootstrap_pipe: pkg_names.extend(line.strip().split()) debootstrap_pipe.close() except OSError: log.warning("debootstrap could not be run") if len(pkg_names) == 0: log.warning("Debootstrap could not report packages") else: new_order.extend(pkg_names) return new_order else: return current_order
def _order_debootstrap(dummy, current_order): """Add the packages needed by debootstrap unless otherwise asked not to.""" conf = picax.config.get_config() log = picax.log.get_logger() if not conf["no_debootstrap"]: new_order = current_order[:] bootstrap_dist = conf["repository_list"][0][0] pkg_names = [] try: debootstrap_pipe = os.popen( "/usr/sbin/debootstrap --print-debs %s" % (bootstrap_dist,)) for line in debootstrap_pipe: pkg_names.extend(line.strip().split()) debootstrap_pipe.close() except OSError: log.warning("debootstrap could not be run") if len(pkg_names) == 0: log.warning("Debootstrap could not report packages") else: new_order.extend(pkg_names) return new_order else: return current_order
def _get_binary_and_source(binary_name, binary_list, source_list): """Take a binary package name (or a list of names that must be kept together) and lists of binary packages and source packages, and return two lists: one of packages to add now, and one of packages to add later. 'Now' and 'later' have to do with the source packing type.""" conf = picax.config.get_config() log = picax.log.get_logger() if type(binary_name) is not types.ListType: binary_names = [binary_name] else: binary_names = binary_name binary_pkgs = [ pkg for pkg in binary_list if pkg["Package"] in binary_names ] if len(binary_pkgs) < 1: raise IndexError, \ "package(s) %s requested for splitting not found" \ % (binary_name,) now_pkgs = binary_pkgs later_pkgs = [] source_pkgs = [] if conf["source"] != "none": for now_pkg in now_pkgs: (source_name, source_version) = now_pkg.get_source_info() new_source_pkgs = [ pkg for pkg in source_list if pkg["Package"] == source_name and pkg["Version"] == source_version ] if len(new_source_pkgs) < 1: log.warning("package %s has no proper source" % (now_pkg["Package"], )) else: source_pkgs.extend(new_source_pkgs) if source_pkgs: if conf["source"] == "mixed": now_pkgs.extend(source_pkgs) else: later_pkgs = source_pkgs return (now_pkgs, later_pkgs)
def _get_binary_and_source(binary_name, binary_list, source_list): """Take a binary package name (or a list of names that must be kept together) and lists of binary packages and source packages, and return two lists: one of packages to add now, and one of packages to add later. 'Now' and 'later' have to do with the source packing type.""" conf = picax.config.get_config() log = picax.log.get_logger() if type(binary_name) is not types.ListType: binary_names = [binary_name] else: binary_names = binary_name binary_pkgs = [pkg for pkg in binary_list if pkg["Package"] in binary_names] if len(binary_pkgs) < 1: raise IndexError, \ "package(s) %s requested for splitting not found" \ % (binary_name,) now_pkgs = binary_pkgs later_pkgs = [] source_pkgs = [] if conf["source"] != "none": for now_pkg in now_pkgs: (source_name, source_version) = now_pkg.get_source_info() new_source_pkgs = [pkg for pkg in source_list if pkg["Package"] == source_name and pkg["Version"] == source_version] if len(new_source_pkgs) < 1: log.warning("package %s has no proper source" % (now_pkg["Package"],)) else: source_pkgs.extend(new_source_pkgs) if source_pkgs: if conf["source"] == "mixed": now_pkgs.extend(source_pkgs) else: later_pkgs = source_pkgs return (now_pkgs, later_pkgs)
def get_distro_packages(distro, section, source=False): """Get the packages (source or binary) from a particular distro.""" log = picax.log.get_logger() package_list = [] read_packages = {} factory = get_package_factory(distro, section, source=source) for pkg in factory: if read_packages.has_key(pkg["Package"]): if pkg["Version"] in read_packages[pkg["Package"]]: log.warning("Package %s is a duplicate, skipping" % (str(pkg), )) continue else: log.warning("Multiple versions of package %s exist" % (str(pkg), )) package_list.append(pkg) return package_list
def get_distro_packages(distro, section, source = False): """Get the packages (source or binary) from a particular distro.""" log = picax.log.get_logger() package_list = [] read_packages = {} factory = get_package_factory(distro, section, source = source) for pkg in factory: if read_packages.has_key(pkg["Package"]): if pkg["Version"] in read_packages[pkg["Package"]]: log.warning("Package %s is a duplicate, skipping" % (str(pkg),)) continue else: log.warning("Multiple versions of package %s exist" % (str(pkg),)) package_list.append(pkg) return package_list
def resolve_package_list(pkgs, pkgs_to_ignore, loose_deps = True): """Resolve the dependencies in the given list of packages, returning a new list in dependency order. Ignore the dependencies for packages in pkgs_to_ignore (and don't include them in the list, either).""" log = picax.log.get_logger() queue = [] results = [] result_versions = {} reject = [] bad_deps = [] in_list = pkgs[:] if pkgs_to_ignore is None: ignore_dict = {} else: ignore_dict = dict(pkgs_to_ignore) # This is the loop that resolves all the dependencies. while len(in_list) > 0: # Take each package in turn off the input list and add it # to the queue. current_in = in_list.pop(0) # Make sure that a package by this name exists; if not, add it # to the rejects list. current_ver = _get_latest_version(cache[current_in]) if not current_ver: reject.append(current_in) continue # Add the package to the queue. queue.append(current_ver) # Prepare to collect cluster information. cluster = [] cluster_handled = [] # Now resolve the queue. The package, and its depenencies, # will be added in the proper order. while len(queue) > 0: # Long queue lengths most likely mean loops. if len(queue) > 100: raise RuntimeError, "queue length too long" # Get the most recent dependency off the queue. current = queue.pop() # Short-circuit the whole process if we've already # had to do it before to resolve some other package # dependency. if result_versions.has_key(current.ParentPkg.Name): rv = result_versions[current.ParentPkg.Name] if rv != current.VerStr: raise RuntimeError, \ "package added twice with different versions" continue # Also short-circuit if the package appears in the ignore # dictionary; we assume that these packages are depsolved. if ignore_dict.has_key(current.ParentPkg.Name): if ignore_dict[current.ParentPkg.Name] == current.VerStr: continue # Look through this package's dependencies for ones # that haven't been seen yet. If there are any, add # them to the top of the queue and loop. found_dep = False dep_list = [] for key in ("PreDepends", "Depends"): if current.DependsList.has_key(key): dep_list.extend(current.DependsList[key]) for dep in dep_list: # For each ORed dependency, check that at least one # has been added. If one has, stop immediately and go # on to the next dependency. All the while, look for # candidate packages to add to the queue. found = False candidate = None cluster_candidate = None try: # Collapse all the possible alternatives into a # single list. alt_list = [] for dep_alternative in dep: # Skip deps we've already flagged as bad. if loose_deps: for bad_dep in bad_deps: d1 = dep_alternative d2 = bad_dep if d1.CompType == d2.CompType and \ d1.TargetVer == d2.TargetVer and \ d1.TargetPkg.Name == d2.TargetPkg.Name: continue for dep_target in dep_alternative.AllTargets(): alt_list.append(dep_target) # Now loop through the alternatives, looking for # a candidate to satisfy the dependency. for dep_target in alt_list: disqualified = False # If the target is in the ignore list, this # is as good as having the package available. if ignore_dict.has_key(dep_target.ParentPkg.Name): ign = ignore_dict[dep_target.ParentPkg.Name] if ign == dep_target.VerStr: raise FoundPackage # If the dependency is part of a cluster, flag # it. This way, the dep gets handled, and the # current package gets added to the cluster if # needed. cluster_names = [x.ParentPkg.Name for x in cluster + cluster_handled] if dep_target.ParentPkg.Name in cluster_names: cluster_candidate = dep_target raise FoundPackage # If the dependency is in the queue, there's # some kind of loop. Report that we've found # a match, but also report that we need a # cluster. for item in queue: if _match_version(item, dep_target): cluster_candidate = dep_target raise FoundPackage # Otherwise, check to make sure the package # isn't already in the results. if result_versions.has_key( dep_target.ParentPkg.Name): rv = result_versions[dep_target.ParentPkg.Name] if rv != dep_target.VerStr: raise RuntimeError, \ "version mismatch with already added package" raise FoundPackage # If it's not already in the results, maybe # it's a candidate for adding to the results. if not loose_deps and not candidate and \ not disqualified: for result_cluster in results: if isinstance(result_cluster, types.ListType): rlist = result_cluster else: rlist = [result_cluster] for result in rlist: if _match_dep(dep_target, result, "Conflicts"): disqualified = True elif _match_dep(result, dep_target, "Conflicts"): disqualified = True # We don't have a valid candidate, and this # package looks to be a good one. if not candidate and not disqualified: candidate = dep_target # This is where we end up if a package fulfilling # a dependency alternative has already been added # to some list. except FoundPackage: found = True # If no alternative was found in the current list, # add the candidate we found to the queue. if not found: # If no candidate package was found either, then # we have an unresolvable dependency. if not candidate: if loose_deps: log.warning( "Could not resolve dep '%s' for package %s" % (str(dep), current.ParentPkg.Name)) for dep_alternative in dep: bad_deps.append(dep_alternative) found_dep = False else: raise RuntimeError, \ "cannot find '%s' dep for package %s" \ % (str(dep), current.ParentPkg.Name) else: # Re-add the current package to the queue. queue.append(current) # Add the candidate after the current package, # so it gets depsolved first. queue.append(candidate) found_dep = True break # Otherwise, if a cluster is needed, set it up. elif cluster_candidate: rev_queue = queue[:] rev_queue.reverse() cluster_changed = False for candidate in [current] + rev_queue: cluster_names = [x.ParentPkg.Name for x in cluster + cluster_handled] if not candidate.ParentPkg.Name in cluster_names: cluster.append(candidate) cluster_changed = True if candidate.ParentPkg.Name == \ cluster_candidate.ParentPkg.Name: break # Only interrupt the dep loop if we changed a cluster. if cluster_changed: queue.append(current) found_dep = True break # We added a dep to the queue, so re-run the queue. if found_dep: continue # Handle the case where the package is a part of a cluster. # Since no deps were added, the cluster member can consider # itself "handled". if current in cluster: cluster.remove(current) cluster_handled.append(current) # If we've handled the entire cluster, then it's time to # add the cluster to the results. if len(cluster) == 0: results.append(cluster_handled) for item in cluster_handled: item_name = item.ParentPkg.Name if not result_versions.has_key(item_name): result_versions[item_name] = item.VerStr else: if result_versions[item_name] != item.VerStr: raise RuntimeError, \ "two packages with same names but different versions added (cluster)" cluster_handled = [] continue # "Leaf" dependency, no cluster. pkg_name = current.ParentPkg.Name results.append(current) if not result_versions.has_key(pkg_name): result_versions[pkg_name] = current.VerStr else: if result_versions[pkg_name] != current.VerStr: raise RuntimeError, \ "two packages with same names but different versions added" # All done. Now get the package names, and return them. final = [] for item in results: if isinstance(item, types.ListType): final.append([x.ParentPkg.Name for x in item]) else: final.append(item.ParentPkg.Name) return final
def resolve_package_list(pkgs, pkgs_to_ignore, loose_deps=True): """Resolve the dependencies in the given list of packages, returning a new list in dependency order. Ignore the dependencies for packages in pkgs_to_ignore (and don't include them in the list, either).""" log = picax.log.get_logger() queue = [] results = [] result_versions = {} reject = [] bad_deps = [] in_list = pkgs[:] if pkgs_to_ignore is None: ignore_dict = {} else: ignore_dict = dict(pkgs_to_ignore) # This is the loop that resolves all the dependencies. while len(in_list) > 0: # Take each package in turn off the input list and add it # to the queue. current_in = in_list.pop(0) # Make sure that a package by this name exists; if not, add it # to the rejects list. current_ver = _get_latest_version(cache[current_in]) if not current_ver: reject.append(current_in) continue # Add the package to the queue. queue.append(current_ver) # Prepare to collect cluster information. cluster = [] cluster_handled = [] # Now resolve the queue. The package, and its depenencies, # will be added in the proper order. while len(queue) > 0: # Long queue lengths most likely mean loops. if len(queue) > 100: raise RuntimeError, "queue length too long" # Get the most recent dependency off the queue. current = queue.pop() # Short-circuit the whole process if we've already # had to do it before to resolve some other package # dependency. if result_versions.has_key(current.ParentPkg.Name): rv = result_versions[current.ParentPkg.Name] if rv != current.VerStr: raise RuntimeError, \ "package added twice with different versions" continue # Also short-circuit if the package appears in the ignore # dictionary; we assume that these packages are depsolved. if ignore_dict.has_key(current.ParentPkg.Name): if ignore_dict[current.ParentPkg.Name] == current.VerStr: continue # Look through this package's dependencies for ones # that haven't been seen yet. If there are any, add # them to the top of the queue and loop. found_dep = False dep_list = [] for key in ("PreDepends", "Depends"): if current.DependsList.has_key(key): dep_list.extend(current.DependsList[key]) for dep in dep_list: # For each ORed dependency, check that at least one # has been added. If one has, stop immediately and go # on to the next dependency. All the while, look for # candidate packages to add to the queue. found = False candidate = None cluster_candidate = None try: # Collapse all the possible alternatives into a # single list. alt_list = [] for dep_alternative in dep: # Skip deps we've already flagged as bad. if loose_deps: for bad_dep in bad_deps: d1 = dep_alternative d2 = bad_dep if d1.CompType == d2.CompType and \ d1.TargetVer == d2.TargetVer and \ d1.TargetPkg.Name == d2.TargetPkg.Name: continue for dep_target in dep_alternative.AllTargets(): alt_list.append(dep_target) # Now loop through the alternatives, looking for # a candidate to satisfy the dependency. for dep_target in alt_list: disqualified = False # If the target is in the ignore list, this # is as good as having the package available. if ignore_dict.has_key(dep_target.ParentPkg.Name): ign = ignore_dict[dep_target.ParentPkg.Name] if ign == dep_target.VerStr: raise FoundPackage # If the dependency is part of a cluster, flag # it. This way, the dep gets handled, and the # current package gets added to the cluster if # needed. cluster_names = [ x.ParentPkg.Name for x in cluster + cluster_handled ] if dep_target.ParentPkg.Name in cluster_names: cluster_candidate = dep_target raise FoundPackage # If the dependency is in the queue, there's # some kind of loop. Report that we've found # a match, but also report that we need a # cluster. for item in queue: if _match_version(item, dep_target): cluster_candidate = dep_target raise FoundPackage # Otherwise, check to make sure the package # isn't already in the results. if result_versions.has_key(dep_target.ParentPkg.Name): rv = result_versions[dep_target.ParentPkg.Name] if rv != dep_target.VerStr: raise RuntimeError, \ "version mismatch with already added package" raise FoundPackage # If it's not already in the results, maybe # it's a candidate for adding to the results. if not loose_deps and not candidate and \ not disqualified: for result_cluster in results: if isinstance(result_cluster, types.ListType): rlist = result_cluster else: rlist = [result_cluster] for result in rlist: if _match_dep(dep_target, result, "Conflicts"): disqualified = True elif _match_dep(result, dep_target, "Conflicts"): disqualified = True # We don't have a valid candidate, and this # package looks to be a good one. if not candidate and not disqualified: candidate = dep_target # This is where we end up if a package fulfilling # a dependency alternative has already been added # to some list. except FoundPackage: found = True # If no alternative was found in the current list, # add the candidate we found to the queue. if not found: # If no candidate package was found either, then # we have an unresolvable dependency. if not candidate: if loose_deps: log.warning( "Could not resolve dep '%s' for package %s" % (str(dep), current.ParentPkg.Name)) for dep_alternative in dep: bad_deps.append(dep_alternative) found_dep = False else: raise RuntimeError, \ "cannot find '%s' dep for package %s" \ % (str(dep), current.ParentPkg.Name) else: # Re-add the current package to the queue. queue.append(current) # Add the candidate after the current package, # so it gets depsolved first. queue.append(candidate) found_dep = True break # Otherwise, if a cluster is needed, set it up. elif cluster_candidate: rev_queue = queue[:] rev_queue.reverse() cluster_changed = False for candidate in [current] + rev_queue: cluster_names = [ x.ParentPkg.Name for x in cluster + cluster_handled ] if not candidate.ParentPkg.Name in cluster_names: cluster.append(candidate) cluster_changed = True if candidate.ParentPkg.Name == \ cluster_candidate.ParentPkg.Name: break # Only interrupt the dep loop if we changed a cluster. if cluster_changed: queue.append(current) found_dep = True break # We added a dep to the queue, so re-run the queue. if found_dep: continue # Handle the case where the package is a part of a cluster. # Since no deps were added, the cluster member can consider # itself "handled". if current in cluster: cluster.remove(current) cluster_handled.append(current) # If we've handled the entire cluster, then it's time to # add the cluster to the results. if len(cluster) == 0: results.append(cluster_handled) for item in cluster_handled: item_name = item.ParentPkg.Name if not result_versions.has_key(item_name): result_versions[item_name] = item.VerStr else: if result_versions[item_name] != item.VerStr: raise RuntimeError, \ "two packages with same names but different versions added (cluster)" cluster_handled = [] continue # "Leaf" dependency, no cluster. pkg_name = current.ParentPkg.Name results.append(current) if not result_versions.has_key(pkg_name): result_versions[pkg_name] = current.VerStr else: if result_versions[pkg_name] != current.VerStr: raise RuntimeError, \ "two packages with same names but different versions added" # All done. Now get the package names, and return them. final = [] for item in results: if isinstance(item, types.ListType): final.append([x.ParentPkg.Name for x in item]) else: final.append(item.ParentPkg.Name) return final