コード例 #1
0
def main():

    # Load data, including full elaborated dependencies and conflict model 3 db.
    depdata.ensure_data_loaded(CONFLICT_MODELS=[3], include_edeps=True)

    con3_dists = [
        dist for dist in depdata.conflicts_3_db if depdata.conflicts_3_db[dist]
    ]

    # Solve all the conflicts! (Store data in the filenames listed.)
    # This is very slow.
    ry.resolve_all_via_backtracking(con3_dists,
                                    depdata.elaborated_dependencies,
                                    depdata.versions_by_package,
                                    'data/backtracker_solutions.json',
                                    'data/backtracker_errors.json',
                                    'data/backtracker_unresolvables.json')
コード例 #2
0
def main():

  # Load data, including full elaborated dependencies and conflict model 3 db.
  depdata.ensure_data_loaded(CONFLICT_MODELS=[3], include_edeps=True)

  con3_dists = [dist for dist in depdata.conflicts_3_db if
      depdata.conflicts_3_db[dist]]

  # Solve all the conflicts! (Store data in the filenames listed.)
  # This is very slow.
  ry.resolve_all_via_backtracking(
      con3_dists,
      depdata.elaborated_dependencies,
      depdata.versions_by_package,
      'data/backtracker_solutions.json',
      'data/backtracker_errors.json',
      'data/backtracker_unresolvables.json')
コード例 #3
0
def main():

    # Load data, including conflict model 3 db.
    depdata.ensure_data_loaded(CONFLICT_MODELS=[3])

    con3_dists = [
        dist for dist in depdata.conflicts_3_db if depdata.conflicts_3_db[dist]
    ]

    # Reload the package information formatted for depsolver.
    pinfos = di.reload_already_converted_from_json(
        'data/deps_converted_for_depsolver.json')

    # Solve all the conflicts!
    # This is very, VERY slow.
    di.resolve_all_via_depsolver(con3_dists, pinfos,
                                 'data/depsolver_solutions.json',
                                 'data/depsolver_errors.json',
                                 'data/depsolver_unresolvables.json')
コード例 #4
0
def main():

  # Load data, including conflict model 3 db.
  depdata.ensure_data_loaded(CONFLICT_MODELS=[3])

  con3_dists = [dist for dist in depdata.conflicts_3_db if
      depdata.conflicts_3_db[dist]]

  # Reload the package information formatted for depsolver.
  pinfos = di.reload_already_converted_from_json(
      'data/deps_converted_for_depsolver.json')


  # Solve all the conflicts!
  # This is very, VERY slow.
  di.resolve_all_via_depsolver(
      con3_dists,
      pinfos,
      'data/depsolver_solutions.json',
      'data/depsolver_errors.json',
      'data/depsolver_unresolvables.json')
コード例 #5
0
def backtracking_satisfy_alpha(distkey_to_satisfy,
                               edeps=None,
                               edeps_alpha=None,
                               edeps_rev=None,
                               versions_by_package=None):
    """
  Small workaround.
  See https://github.com/awwad/depresolve/issues/12
  """
    if edeps is None or edeps_alpha is None or edeps_rev is None:
        depdata.ensure_data_loaded(include_edeps=True, include_sorts=True)
        edeps = depdata.elaborated_dependencies
        edeps_alpha = depdata.elaborated_alpha
        edeps_rev = depdata.elaborated_reverse
        versions_by_package = depdata.versions_by_package

    elif versions_by_package is None:
        versions_by_package = depdata.generate_dict_versions_by_package(edeps)

    satisfy_output = None
    # Try three different ways until one works or all fail.
    for edeps_trying in [edeps_rev, edeps_alpha]:
        try:
            satisfy_output = backtracking_satisfy(distkey_to_satisfy,
                                                  edeps_trying,
                                                  versions_by_package)

        except depresolve.UnresolvableConflictError:
            pass

        else:
            assert satisfy_output, 'Programming error. Should not be empty.'
            break

    if satisfy_output is None:
        satisfy_output = backtracking_satisfy(distkey_to_satisfy, edeps,
                                              versions_by_package)

    return satisfy_output
コード例 #6
0
def recheck_all_unsatisfied():
    depdata.ensure_data_loaded(CONFLICT_MODELS=[3], include_edeps=True)

    solutions = depdata.load_json_db('data/resolved_via_rbtpip.json')

    installed = [d for d in solutions if solutions[d][0]]
    satisfied = [d for d in solutions if solutions[d][1]]

    installed_but_unsatisfied = [d for d in installed if d not in satisfied]

    # We re-run this last set, to see if they're in fact unsatisfied.
    for distkey in installed_but_unsatisfied:
        satisfied, errstring = recheck_satisfied(distkey,
                                                 solutions[distkey][2])

        if satisfied or errstring != solutions[distkey][3]:
            print('Updating satisfied-ness!: ' + distkey)
            solutions[distkey][1] = satisfied
            solutions[distkey][3] = errstring

        else:
            print('Still unsatisfied: ' + distkey + '. Error: ' + errstring)

    return solutions
コード例 #7
0
def main():
  # Some defaults:
  n_sdists_to_process = 0 # debug; max packages to explore during debug - overriden by --n=N argument.
  conflict_model = 3
  no_skip = False
  careful_skip = False
  use_local_index = False
  use_local_index_old = False
  #run_all_conflicting = False

  # Files and directories.
  assert(os.path.exists(WORKING_DIRECTORY)), 'Working dir does not exist...??'

  # Ensure that appropriate directory for downloaded distros exists.
  # This would be terrible to duplicate if scraping a large number of packages.
  # One such sdist cache per system! Gets big.
  if not os.path.exists(TEMPDIR_FOR_DOWNLOADED_DISTROS):
    os.makedirs(TEMPDIR_FOR_DOWNLOADED_DISTROS)



  logger.info("scrape_deps_and_detect_conflicts - Version 0.5")
  distkeys_to_inspect_not_normalized = [] # not-yet-normalized user input, potentially filled with distkeys to check, from arguments
  distkeys_to_inspect = [] # list after argument normalization

  # Argument processing.
  # If we have arguments coming in, treat those as the packages to inspect.
  if len(sys.argv) > 1:
    for arg in sys.argv[1:]:
      if arg.startswith("--n="):
        n_sdists_to_process = int(arg[4:])
      elif arg == "--cm1":
        conflict_model = 1
      elif arg == "--cm2":
        conflict_model = 2
      elif arg == "--cm3":
        conflict_model = 3
      elif arg == "--noskip":
        no_skip = True
      elif arg == '--carefulskip':
        careful_skip = True
      elif arg == "--local-old":
        # without ='<directory>' means we pull alphabetically from local PyPI
        # mirror at /srv/pypi/
        # Parse .tar.gz files as they appear in bandersnatch version <= 1.8
        # For newer versions of bandersnatch, the sdist files are stored
        # differently (not in project-based directories) and so the argument
        # --local should be used instead.
        use_local_index_old = True
      elif arg == "--local":
        # without ='<directory>' means we pull from local PyPI mirror at
        # /srv/pypi/
        # Parse .tar.gz files as they appear in bandersnatch version 1.11
        # For bandersnatch 1.11, the sdist files are stored differently than in
        # <1.8. They are no longer kept in project-based directories).
        # If you are using a version of bandersnatch <=1.8, the argument
        # --local-old should be used instead.
        use_local_index = True
      #elif arg == '--conflicting':
      #  # Operate locally and run on the distkeys provided in the indicated
      #  # file, each on its own line.
      #  use_local_index = True
      #  run_all_conflicting = True
      else:
        distkeys_to_inspect_not_normalized.append(arg) # e.g. 'motorengine(0.7.4)'
        # For simplicity right now, I'll use one mode or another, not both.
        # Last arg has it if both.


  # Normalize any input distkeys we were given.
  for distkey in distkeys_to_inspect_not_normalized:
    assert '(' in distkey and distkey.endswith(')'), 'Invalid input.'
    distkey = depdata.normalize_distkey(distkey)
    distkeys_to_inspect.append(distkey)


  # Were we not given any distkeys to inspect?
  if not distkeys_to_inspect:# and not run_all_conflicting:

    if not use_local_index and not use_local_index_old:
      # If we're not using a local index, we have nothing to do.
      raise ValueError('You neither specified distributions to scrape nor '
          '(alternatively) indicated that they should be chosen from a local '
          'mirror.')

    elif use_local_index_old:
      # If we were told to work with a local mirror, but weren't given specific
      # sdists to inspect, we'll scan everything in
      # BANDERSNATCH_MIRROR_SDIST_DIR until we have n_sdists_to_process sdists.
      # There is a better way to do this, but I'll leave this as is for now.

      # Ensure that the local PyPI mirror directory exists first.
      if not os.path.exists(BANDERSNATCH_MIRROR_SDIST_DIR):
        raise Exception('--- Exception. Expecting a bandersnatched mirror of '
            'PyPI at ' + BANDERSNATCH_MIRROR_SDIST_DIR + ' but that directory '
            'does not exist.')
      i = 0
      for dir, subdirs, files in os.walk(BANDERSNATCH_MIRROR_SDIST_DIR):
        for fname in files:
          if is_sdist(fname):
            tarfilename_full = os.path.join(dir, fname)
            # Deduce package names and versions from sdist filename.
            distkey = get_distkey_from_full_filename(tarfilename_full)
            distkeys_to_inspect.append(distkey)
            i += 1
            # awkward control structures, but saving debug run time. tidy later
            if i >= n_sdists_to_process:
              break
        if i >= n_sdists_to_process:
          break

    else: # use_local_index (modern bandersnatch version)
      assert use_local_index, 'Programming error.'
      # # sdists live here: /srv/pypi/web/packages/??/??/*/*.tar.gz
      # # Can implement this such that it checks those places.
      # for name1 in os.listdir(BANDERSNATCH_NEW_MIRROR_SDIST_DIR):
      #   if len(name1) != 2:
      #     continue
      #   for name2 in os.listdir(os.path.join(
      #       BANDERSNATCH_NEW_MIRROR_SDIST_DIR, name1)):
      #     if len(name2) != 2:
      #       continue
      #     for name3 in os.listdir(os.path.join(
      #         BANDERSNATCH_NEW_MIRROR_SDIST_DIR, name1, name2)):
      #       if len(name3) != 60:
      #         continue
      #       for fname in os.listdir():
      #  #.... No, this is not going to unambiguously get me the package name
      #  # in the way that it used to in older versions of bandersnatch.
      #  # Rather than dealing with unexpected naming consequences, I'll go
      #  # with the following even more annoying hack....

      # A dictionary of all versions of all packages on the mirror,
      # collected out-of-band (via xml-rpc at same time as mirroring occurred).
      vbp_mirror = json.load(open('data/versions_by_package.json', 'r'))
      i = 0
      for package in vbp_mirror:
        if i >= n_sdists_to_process:
          break

        for version in vbp_mirror[package]:

          if i >= n_sdists_to_process:
            break

          distkey = depdata.distkey_format(package, version)
          distkeys_to_inspect.append(distkey)

          i += 1



  # We should now have distkeys to inspect (unless run_all_conflicting is True).


  # Load the dependencies, conflicts, and blacklist databases.
  # The blacklist is a list of runs that resulted in errors or runs that were
  # manually added because, for example, they hang seemingly forever or take an
  # inordinate length of time.
  depdata.ensure_data_loaded([conflict_model])

  # Alias depdata.conflicts_db to the relevant conflicts db. (Ugly)
  depdata.set_conflict_model_legacy(conflict_model) # should remove this


  #if run_all_conflicting:
  #  distkeys_to_inspect = [distkey for distkey in depdata.conflicts_3_db if
  #      depdata.conflicts_3_db[distkey]]


  n_inspected = 0
  n_successfully_processed = 0
  last_wrote_at = 0

  # Now take all of the distkeys ( e.g. 'python-twitter(0.2.1)' ) indicated and
  # run on them.
  for distkey in distkeys_to_inspect:
    
    # To avoid losing too much data, make sure we at least write data to disk
    # about every 100 successfully processed or 10000 inspected dists. Avoid
    # writing repeatedly in edge cases (e.g. when we write after 100
    # successfully processed and then have to keep writing for every skip that
    # occurs after that.
    progress = n_inspected + n_successfully_processed * 100
    if progress > last_wrote_at + 10000:
      last_wrote_at = progress
      logger.info("Writing early.")
      depdata.write_data_to_files([conflict_model])


    # The skip conditions.

    # If dist is in the blacklist for the same version of python we're running.
    blacklisted = distkey in depdata.blacklist \
        and sys.version_info.major in depdata.blacklist[distkey]

    # If dist has conflict info saved already
    already_in_conflicts = distkey in depdata.conflicts_db

    # Do we have dep info for the dist? Not a skip condition, but part of
    # careful_skip tests.
    already_in_dependencies = distkey in depdata.dependencies_by_dist


    # If we're not in no_skip mode, perform the skip checks.
    # Skip checks. If the dist is blacklisted or we already have dependency
    # data, then skip it - unless we're in careful skip mode and we don't
    # have dependency data for the dist.
    if not no_skip and (blacklisted or already_in_conflicts):

      # If dist isn't blacklisted, we already have conflict info, there's no
      # dependency info, and careful skip is on, don't actually skip.
      if careful_skip and not already_in_dependencies and not blacklisted:
        print('---    Not skipping ' + distkey + ': ' +
            'Already have conflict data, however there is no dependency info '
            'for the dist, the dist is not blacklisted, and we are in '
            'careful_skip mode.')

      else: # Skip, since we don't have a reason not to.
        n_inspected += 1
        print('---    SKIP -- ' + distkey + ': ' +
            'Blacklisted. '*blacklisted +
            'Already have conflict data. '*already_in_conflicts +
            '(Finished ' + str(n_inspected) + ' out of ' +
            str(len(distkeys_to_inspect)) + ')')
        continue


    # If we didn't skip, process the dist.

    packagename = depdata.get_packname(distkey)
    version_string = depdata.get_version(distkey)
    #assert(distkey.rfind(')') == len(distkey) - 1)
    formatted_requirement = packagename + "==" + version_string
    exitcode = None
    assert(conflict_model in [1, 2, 3])

    # Construct the argument list.
    # Include argument to pass to pip to tell it not to prod users about our
    # strange pip version (lest they follow that instruction and install a
    # standard pip version):
    pip_arglist = [
      'install',
      '-d', TEMPDIR_FOR_DOWNLOADED_DISTROS,
      '--disable-pip-version-check',
      '--find-dep-conflicts', str(conflict_model),
      '--quiet']
    
    if use_local_index:
      pip_arglist.extend(['-i', BANDERSNATCH_MIRROR_INDEX_DIR])

    pip_arglist.append(formatted_requirement)

    # With arg list constructed, call pip.main with it to run a modified pip
    # install attempt (will not install).
    # This assumes that we're dealing with my pip fork version 8.0.0.dev0seb).
    print('---    Sending ' + distkey + ' to pip.')
    logger.debug('Scraper says: before pip call, len(deps) is ' +
        str(len(depdata.dependencies_by_dist)))

    # Call pip, with a 5 minute timeout.
    exitcode = None # scoping paranoia
    try:
      exitcode = _call_pip_with_timeout(pip_arglist)
    except timeout.TimeoutException as e: # This catch is not likely. See below
      logger.warning('pip timed out on dist ' + distkey + '(5min)!'
          ' Will treat as error. Exception follows: ' + str(e.args))
      # Set the exit code to something other than 2 or 0 and it'll be treated
      # like any old pip error below, resulting in a blacklist.
      exitcode = 1000

    # However, unfortunately, we cannot assume that pip will let that exception
    # pass up to us. It seems to take the signal, stop and clean up, and then
    # return exit code 2. This is fine, except that then we can't really
    # blacklist the process. I'd have to add a timer here, detect something
    # very close to the timeout, and guess that it timed out. /: That sucks.
    # In any case, we'll not learn that it's a process that times out, but
    # we'll just look at it as a possible conflict case. (The data recorded
    # will not list it as a conflict. Hopefully, that data is not corrupted.
    # It's unlikely that it would have been, though, so I judge this OK.)
    
    # Process the output of the pip command.
    if exitcode == 2:
      print('--- X  SDist ' + distkey + ' : pip errored out (code=' +
        str(exitcode) + '). Possible DEPENDENCY CONFLICT. Result recorded in '
        'conflicts_<...>.json. (Finished ' +
        str(n_inspected) + ' out of ' + str(len(distkeys_to_inspect)) +
        ')')
    elif exitcode == 0:
      print('--- .  SDist ' + distkey + ' : pip completed successfully. '
        'No dependency conflicts observed. (Finished ' + str(n_inspected)
        + ' out of ' + str(len(distkeys_to_inspect)) + ')')
    else:
      print('--- .  SDist ' + distkey + ': pip errored out (code=' +
        str(exitcode) + '), but it seems to have been unrelated to any dep '
        'conflict.... (Finished ' + str(n_inspected) + ' out of ' +
        str(len(distkeys_to_inspect)) + ')')
      # Store in the list of failing packages along with the python version
      # we're running. (sys.version_info.major yields int 2 or 3)
      # Contents are to eventually be a list of the major versions in which it
      # fails. We should never get here if the dist is already in the blacklist
      # for this version of python, but let's keep going even if so.
      if distkey in depdata.blacklist and sys.version_info.major in \
        depdata.blacklist[distkey] and not no_skip:
        logger.warning('  WARNING! This should not happen! ' + distkey + ' was'
          'already in the blacklist for python ' + str(sys.version_info.major)
          + ', thus it should not have been run unless we have --noskip on '
          '(which it is not)!')
      else:
      # Either the dist is not in the blacklist or it's not in the blacklist
      # for this version of python. (Sensible)
        if distkey not in depdata.blacklist: # 
          depdata.blacklist[distkey] = [sys.version_info.major]
          logger.info("  Added entry to blacklist for " + distkey)
        else:
          assert(no_skip or sys.version_info.major not in depdata.blacklist[distkey])
          depdata.blacklist[distkey].append(sys.version_info.major)
          logger.info("  Added additional entry to blacklist for " + distkey)

          
    # end of exit code processing
    n_inspected += 1
    n_successfully_processed += 1

  # end of for each tarfile/sdist

  # We're done with all packages. Write the collected data back to file.
  logger.debug("Writing.")
  depdata.write_data_to_files([conflict_model])
コード例 #8
0
def backtracking_satisfy(distkey_to_satisfy,
                         edeps=None,
                         versions_by_package=None):
    """
  Provide a list of distributions to install that will fully satisfy a given
  distribution's dependencies (and its dependencies' dependencies, and so on),
  without any conflicting or incompatible versions.

  This is a backtracking dependency resolution algorithm.
  
  This recursion is extremely inefficient, and would profit from dynamic
  programming in general.

  Note that there must be a level of indirection for the timeout decorator to
  work as it is currently written. (This function can't call itself directly
  recursively, but must instead call _backtracking_satisfy, which then can
  recurse.)


  Arguments:
    - distkey_to_satisfy ('django(1.8.3)'),
    - edeps (dictionary returned by depdata.deps_elaborated; see there.)
    - versions_by_package (dictionary of all distkeys, keyed by package name)
      (If not included, it will be generated from edeps.)

  Returns:
    - list of distkeys needed as direct or indirect dependencies to install
      distkey_to_satisfy, including distkey_to_satisfy

  Throws:
    - timeout.TimeoutException if the process takes longer than 5 minutes
    - depresolve.UnresolvableConflictError if not able to generate a solution
      that satisfies all dependencies of the given package (and their
      dependencies, etc.). This suggests that there is an unresolvable
      conflict.
    - depresolve.ConflictingVersionError
      (Should not raise, ideally, but might - requires more testing)
    - depresolve.NoSatisfyingVersionError
      (Should not raise, ideally, but might - requires more testing)

  """
    if edeps is None:
        depdata.ensure_data_loaded(include_edeps=True)
        edeps = depdata.elaborated_dependencies
        versions_by_package = depdata.versions_by_package

    elif versions_by_package is None:
        versions_by_package = depdata.generate_dict_versions_by_package(edeps)

    try:
        (satisfying_candidate_set, new_conflicts, child_dotgraph) = \
            _backtracking_satisfy(distkey_to_satisfy, edeps, versions_by_package)

    except depresolve.ConflictingVersionError as e:
        # Compromise traceback style so as not to give up python2 compatibility.
        six.reraise(
            depresolve.UnresolvableConflictError,
            depresolve.UnresolvableConflictError(
                'Unable to find solution'
                ' to a conflict with one of ' + distkey_to_satisfy +
                "'s immediate "
                'dependencies.'),
            sys.exc_info()[2])

        # Python 3 style (by far the nicest):
        #raise depresolve.UnresolvableConflictError('Unable to find solution to '
        #    'a conflict with one of ' + distkey_to_satisfy + "'s immediate "
        #    'dependencies.') from e

        # Original (2 or 3 compatible but not great on either, especially not 2)
        #raise depresolve.UnresolvableConflictError('Unable to find solution to '
        #     'conflict with one of ' + distkey_to_satisfy + "'s immediate "
        #     'dependencies.' Lower level conflict exception follows: ' + str(e))

    else:
        return satisfying_candidate_set
コード例 #9
0
def are_fully_satisfied(candidates,
                        edeps=None,
                        versions_by_package=None,
                        disregard_setuptools=False,
                        report_issue=False):
    """
  Validates the results of a resolver solution.
  Given a set of distkeys, determines whether or not all dependencies of all
  given dists are satisfied by the set (and all dependencies of their
  dependencies, etc.).
  Returns True if that is so, else returns False.

  Note that this depends on the provided dependency information in edeps.
  If those dependencies were harvested on a system that's different from the
  one that generated the given candidates (e.g. if even the python versions
  used are different), there's a chance the dependencies won't actually match
  since, as we know, PyPI dependencies are not static..............

  Arguments:
    1. candidates: a list of distkeys indicating which dists have been selected
           to satisfy each others' dependencies.
    2. edeps: elaborated dependencies (see depresolve/depdata.py) If not
           provided, this will be loaded from the data directory using
           depdata.ensure_data_loaded.
    3. versions_by_package (as generated by
           depresolve.depdata.generate_dict_versions_by_package(); a dict of
           all versions for each package name)). If not included, this will be
           generated from the given (or loaded) edeps.
    4. disregard_setuptools: optional. I dislike this hack. Because for the
           rbtcollins resolver, I'm testing solutions generated by pip installs
           and harvested by pip freeze, I'm not going to get setuptools listed
           in the solution set (pip freeze doesn't list it), so ... for that, I
           pass in disregard_setuptools=True.
    5. report_issue: optional. If True, additionally returns a string
           describing the (first) unsatisfied dependency.

  Returns:
    - True or False
    - (ONLY if report_issue is True), A string description of unsatisfied
      dependencies.

  Throws:
    - depresolve.MissingDependencyInfoError:
        if the dependencies data lacks info for one of the candidate dists.
        e.g. if solution employs a version not in the outdated dependency data

  """
    # Lowercase the distkeys for our all-lowercase data, just in case.
    candidates = [distkey.lower() for distkey in candidates]

    # Load the dependency library if one wasn't provided.
    if edeps is None:
        depdata.ensure_data_loaded(include_edeps=True)
        edeps = depdata.elaborated_dependencies
        versions_by_package = depdata.versions_by_package

    elif versions_by_package is None:
        versions_by_package = depdata.generate_dict_versions_by_package(edeps)

    satisfied = True
    problem = ''

    for distkey in candidates:

        depdata.assume_dep_data_exists_for(distkey, edeps)

        for edep in edeps[distkey]:
            this_dep_satisfied = is_dep_satisfied(
                edep, candidates, disregard_setuptools=disregard_setuptools
            )  # do not use report_issue

            if not this_dep_satisfied:
                satisfied = False
                this_problem = distkey + ' dependency ' + edep[0] + str(edep[2]) + \
                    ' is not satisfied by candidate set: ' + str(candidates) + \
                    '. Acceptable versions were: ' + str(edep[1])
                logger.info(this_problem)
                if problem:
                    problem += '. '
                problem += this_problem

    if report_issue:
        return satisfied, problem
    else:
        return True
コード例 #10
0
          ['motor', ''],
          ['six', ''],
          ['easydict', '']],


  The old format is consistent with a piece of pip's internal specifier
  representations, and the new format is consistent with pip's SpecifierSets,
  which are much more useful. The switch resolves some bugs and makes things
  easier to code and read.

"""

import depresolve.depdata as depdata
from deptools import spectuples_to_specstring

depdata.ensure_data_loaded()

new_deps = dict()

for distkey in depdata.dependencies_by_dist:

    my_deps = depdata.dependencies_by_dist[distkey]
    new_deps[distkey] = []

    for dep in my_deps:  # for every one of its dependencies,
        satisfying_packagename = dep[0]
        spectuples = dep[1]
        specstring = ''

        # Catch case where there are new style dependencies among the old....
        if type(spectuples) in [list, tuple]:
コード例 #11
0
          ['six', ''],
          ['easydict', '']],


  The old format is consistent with a piece of pip's internal specifier
  representations, and the new format is consistent with pip's SpecifierSets,
  which are much more useful. The switch resolves some bugs and makes things
  easier to code and read.

"""

import depresolve.depdata as depdata
from deptools import spectuples_to_specstring


depdata.ensure_data_loaded()

new_deps = dict()


for distkey in depdata.dependencies_by_dist:
  
  my_deps = depdata.dependencies_by_dist[distkey]
  new_deps[distkey] = []

  for dep in my_deps: # for every one of its dependencies,
    satisfying_packagename = dep[0]
    spectuples = dep[1]
    specstring = ''

    # Catch case where there are new style dependencies among the old....
コード例 #12
0
def main():
    """
  """
    successes = []

    # Load the giant dictionary of scraped dependencies for heavy testing.
    depdata.ensure_data_loaded(include_edeps=True)

    # Test resolvability.conflicts_with, which is used in the resolver.
    successes.append(test_conflicts_with())  #0

    # Test resolvability.dist_lists_are_equal, which is used in testing.
    successes.append(test_dist_lists_are_equal())  #1

    # Test resolvability.sort_versions, which is used in a variety of functions.
    successes.append(test_sort_versions())  #2

    # Test the detection of model 2 conflicts from deps.
    successes.append(test_detect_model_2_conflicts())  #3

    # Test the backtracking resolver on basic samples.
    # We expected the current version of backtracking_satisfy to fail on the 2nd
    # through 4th calls.
    # Hopefully, it can now work properly. (Nope - switching expectation back
    # to failure. New backtracking algorithm will handle these, later.)

    successes.append(
        test_resolver(ry.backtracking_satisfy, testdata.DEPS_SIMPLE_SOLUTION,
                      'x(1)', testdata.DEPS_SIMPLE))  #4

    successes.append(
        test_resolver(
            ry.backtracking_satisfy,
            testdata.DEPS_SIMPLE2_SOLUTION,
            'x(1)',
            testdata.DEPS_SIMPLE2,
            expected_exception=depresolve.UnresolvableConflictError))  #5

    successes.append(
        test_resolver(
            ry.backtracking_satisfy,
            testdata.DEPS_SIMPLE3_SOLUTION,
            'x(1)',
            testdata.DEPS_SIMPLE3,
            expected_exception=depresolve.UnresolvableConflictError))  #6

    successes.append(
        test_resolver(
            ry.backtracking_satisfy,
            testdata.DEPS_SIMPLE4_SOLUTION,
            'x(1)',
            testdata.DEPS_SIMPLE4,
            expected_exception=depresolve.UnresolvableConflictError))  #7

    # Turning this one off because the random order of dependencies in
    # elaborated_dependencies combined with the backtracker bug means that
    # this is not a reliable test: if requests appears first in the edeps dict
    # for metasort(0.3.6), it will fail to resolve because a too-recent requests
    # version is set in stone, and a conflict resolution cannot be found, since
    # the available solution uses the older requests 2.5.3.
    #
    # # Test the backtracking resolver on the case of metasort(0.3.6)
    # expected_metasort_result = [
    #     'biopython(1.66)', 'metasort(0.3.6)', 'onecodex(0.0.9)',
    #     'requests(2.5.3)', 'requests-toolbelt(0.6.0)']
    # successes.append(test_resolver(ry.backtracking_satisfy, #8
    #     expected_metasort_result, 'metasort(0.3.6)',
    #     depdata.dependencies_by_dist,
    #     versions_by_package=depdata.versions_by_package,
    #     edeps=depdata.elaborated_dependencies))

    # Test the backtracking resolver on a few model 3 conflicts (pip
    # failures). Expect these conflicts to resolve. Formerly test 8. #9-11
    for distkey in testdata.RESOLVABLE_MODEL_3_SAMPLES:
        successes.append(
            test_resolver(ry.backtracking_satisfy,
                          None,
                          distkey,
                          depdata.dependencies_by_dist,
                          versions_by_package=depdata.versions_by_package,
                          edeps=depdata.elaborated_dependencies))

    # Test the backtracking resolver on some conflicts we know to be
    # unresolvable. Formerly test 9. #12
    for distkey in testdata.UNRESOLVABLE_MODEL_3_SAMPLES:
        successes.append(
            test_resolver(
                ry.backtracking_satisfy,
                None,
                distkey,
                depdata.dependencies_by_dist,
                versions_by_package=depdata.versions_by_package,
                edeps=depdata.elaborated_dependencies,
                expected_exception=depresolve.UnresolvableConflictError))

    # # Test the backtracking resolver on some conflicts we know to be
    # # resolvable but expect it to fail at. #13-16
    # for distkey in testdata.HARDER_RESOLVABLE_MODEL_3_SAMPLES:
    #   successes.append(test_resolver(ry.backtracking_satisfy, None, distkey,
    #       depdata.dependencies_by_dist,
    #       versions_by_package=depdata.versions_by_package,
    #       edeps=depdata.elaborated_dependencies,
    #       expected_exception=depresolve.UnresolvableConflictError))


    assert False not in [success for success in successes], \
        "Some tests failed! Results are: " + str(successes)

    logger.info("All tests in main() successful. (: (:")
コード例 #13
0
def main():
    """
  Choose some conflicting dists to test rbtcollins solver on.

    Steps:
    1. Load dependency data.

    2. Call rbttest to solve using rbtcollins' pip branch issue-988 and
       determine the correctness of that solution.

    3. Write all the solution sets and correctness info to a json file.

  """
    # Create virtual environments directory if it doesn't exist.
    if not os.path.exists(VENVS_DIR):
        os.makedirs(VENVS_DIR)

    distkeys_to_solve = []

    n_distkeys = 3  # default 3, overriden by argument --n=, or specific distkeys

    args = sys.argv[1:]
    noskip = False
    local = False
    all_conflicting = False

    if args:
        for arg in args:
            if arg == '--noskip':
                noskip = True
            elif arg == '--local':
                local = True
            elif arg.startswith('--local='):
                local = arg[8:]
            elif arg.startswith('--n='):
                n_distkeys = int(arg[4:])
            elif arg == '--all':
                local = True
                all_conflicting = True
            else:
                try:
                    distkeys_to_solve.append(depdata.normalize_distkey(arg))
                except Exception as e:
                    print(
                        'Unable to normalize provided argument as distkey: ' +
                        str(arg) + '. Please provide correct arguments.')
                    raise

    # If we didn't get any specific distkeys to solve for from the args, then
    # pick randomly:

    if distkeys_to_solve:
        n_distkeys = len(distkeys_to_solve)

    else:  # not distkeys_to_solve:
        # Randomize from the model 3 conflict list.

        depdata.ensure_data_loaded()

        con3 = depdata.conflicts_3_db

        conflicting = [depdata.normalize_distkey(d) for d in con3 if con3[d]]

        if all_conflicting:
            distkeys_to_solve = conflicting

        else:
            import random
            for i in range(0, n_distkeys):
                distkeys_to_solve.append(random.choice(conflicting))

    ###############
    # Step 1: Load dependency data.

    # Load dependencies from their json file, harvested in prior full run of
    # scraper.
    depdata.ensure_data_loaded(include_edeps=True)
    deps = depdata.dependencies_by_dist
    edeps = depdata.elaborated_dependencies  # potentially stale!
    versions = depdata.versions_by_package  # potentially stale!

    # Prepare solution dictionary.
    solution_dict = depdata.load_json_db(SOLUTIONS_JSON_FNAME)

    ###############
    # Step 2: Run rbttest to solve and test solution.
    try:
        for distkey in distkeys_to_solve:

            if not noskip and distkey in solution_dict:
                logger.info('Skipping rbt solve for ' + distkey +
                            ' (already have '
                            'results).')
                continue

            # Try-scoping paranoia.
            installed = None
            satisfied = None
            solution = None
            errstring = None
            stderr_installation = None

            try:
                # Explicit with multiple variables for clarity for the reader.
                (installed, satisfied, solution, errstring, stderr_installation) = \
                    rbttest(distkey, edeps, versions, local)
                solution_dict[distkey] = (installed, satisfied, solution,
                                          errstring, stderr_installation)

            except UnrelatedInstallFailure as e:
                # Installation failed in some trivial way and should be retried once.
                # For example, virtual environment creation failed.
                (installed, satisfied, solution, errstring, stderr_installation) = \
                    rbttest(distkey, edeps, versions, local)
                solution_dict[distkey] = (installed, satisfied, solution,
                                          errstring, stderr_installation)

            # ###############
            # # Step 3: Dump solutions and solution correctness info to file.
            # # Until this is stable, write after every solution so as not to lose data.
            # logger.info('Writing results for ' + distkey)
            # json.dump(solution_dict, open(SOLUTIONS_JSON_FNAME, 'w'))

    except:
        print(
            'Encountered ERROR. Saving solutions to file and halting. Error:')
        traceback.print_exc()

    ###############
    # Step 3: Dump solutions and solution correctness info to file.

    finally:
        print('Writing solutions gathered to ' + SOLUTIONS_JSON_FNAME)
        try:
            json.dump(solution_dict, open(SOLUTIONS_JSON_FNAME, 'w'))
        except:
            import ipdb
            ipdb.set_trace()
            print(
                'Tried to write gathered solutions to file, but failed to write.'
                'Entering debug mode to allow data recovery.')
コード例 #14
0
def main():

  # Try out the SAT solver in depsolver by using the wrapper function in
  # depsolver_integrate and passing that to the resolver tester in
  # resolver.test_resolvability.
  successes = []

  # Does the version string conversion work as expected?
  successes.append(test_depsolver_version_str_conversion())

  # Can we convert basic dependencies into depsolver PackageInfo?
  successes.append(test_depsolver_conversion())

  # Can we convert slightly more elaborate dependencies into depsolver
  # PackageInfo?
  successes.append(test_depsolver_conversion2())
  successes.append(test_depsolver_conversion3())


  # Basic resolvability test.
  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      testdata.DEPS_SIMPLE_SOLUTION, #expected result
      'x(1)', # dist to install
      testdata.DEPS_SIMPLE, # dependency data
      use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  ))


  # The next three test resolvable conflicts that my backtracker can't solve.
  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      testdata.DEPS_SIMPLE2_SOLUTION, #expected result
      'x(1)', # dist to install
      testdata.DEPS_SIMPLE2, # dependency data
      use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  ))

  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      testdata.DEPS_SIMPLE3_SOLUTION, #expected result
      'x(1)', # dist to install
      testdata.DEPS_SIMPLE3, # dependency data
      use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  ))

  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      testdata.DEPS_SIMPLE4_SOLUTION, #expected result
      'x(1)', # dist to install
      testdata.DEPS_SIMPLE4, # dependency data
      use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  ))

  # This tests a SLIGHTLY more complex version string, via the resolver.
  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      DEPS_EDGE_CASES_SOLUTION, #expected result
      'pip-accel(0.9.10)', # dist to install
      DEPS_EDGE_CASES, # dependency data
      use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  ))


  # # The next two test complex versions strings and are otherwise the same as 2.
  # successes.append(test_resolvability.test_resolver(
  #     depsolver_integrate.resolve_via_depsolver, # resolver function
  #     DEPS_SIMPLE5_SOLUTION, #expected result
  #     'x(1)', # dist to install
  #     DEPS_SIMPLE5, # dependency data
  #     use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  # ))

  # successes.append(test_resolvability.test_resolver(
  #     depsolver_integrate.resolve_via_depsolver, # resolver function
  #     DEPS_SIMPLE6_SOLUTION, #expected result
  #     'x(1)', # dist to install
  #     DEPS_SIMPLE6, # dependency data
  #     use_raw_deps=True # Do not convert deps to edeps - func expects deps.
  # ))

  # This one tests an unresolvable conflict.
  successes.append(test_resolvability.test_resolver(
      depsolver_integrate.resolve_via_depsolver, # resolver function
      testdata.DEPS_UNRESOLVABLE_SOLUTION, #expected result
      'x(1)', # dist to install
      testdata.DEPS_UNRESOLVABLE, # dependency data
      use_raw_deps=True, # Do not convert deps to edeps - func expects deps.
      expected_exception=depresolve.UnresolvableConflictError
  ))



  # Now try a conversion of all deps into depsolver format.
  # We need to load the full dependencies dict
  depdata.ensure_data_loaded()

  assert(len(depdata.dependencies_by_dist))

  (deps_serious_depsolver, dists_unable_to_convert) = \
      depsolver_integrate.convert_packs_to_packageinfo_for_depsolver(
      depdata.dependencies_by_dist)

  assert len(depdata.dependencies_by_dist) == \
      len(deps_serious_depsolver) + len(dists_unable_to_convert), \
      'Programming error. Output of ' + \
      'convert_packs_to_packageinfo_for_depsolver does not make sense.'

  if not len(deps_serious_depsolver) > 100000:
      logger.info('Full conversion has failed. Number of converted packages '
          'is: ' + len(deps_serious_depsolver))
      successes.append(False)
  else:
    successes.append(True)
    


  assert False not in [success for success in successes], \
      "Some tests failed! Results are: " + str(successes)


  logger.info("All tests in main() successful. (: (:")