def main(): # Some defaults: n_sdists_to_process = 0 # debug; max packages to explore during debug - overriden by --n=N argument. conflict_model = 3 no_skip = False careful_skip = False use_local_index = False use_local_index_old = False #run_all_conflicting = False # Files and directories. assert(os.path.exists(WORKING_DIRECTORY)), 'Working dir does not exist...??' # Ensure that appropriate directory for downloaded distros exists. # This would be terrible to duplicate if scraping a large number of packages. # One such sdist cache per system! Gets big. if not os.path.exists(TEMPDIR_FOR_DOWNLOADED_DISTROS): os.makedirs(TEMPDIR_FOR_DOWNLOADED_DISTROS) logger.info("scrape_deps_and_detect_conflicts - Version 0.5") distkeys_to_inspect_not_normalized = [] # not-yet-normalized user input, potentially filled with distkeys to check, from arguments distkeys_to_inspect = [] # list after argument normalization # Argument processing. # If we have arguments coming in, treat those as the packages to inspect. if len(sys.argv) > 1: for arg in sys.argv[1:]: if arg.startswith("--n="): n_sdists_to_process = int(arg[4:]) elif arg == "--cm1": conflict_model = 1 elif arg == "--cm2": conflict_model = 2 elif arg == "--cm3": conflict_model = 3 elif arg == "--noskip": no_skip = True elif arg == '--carefulskip': careful_skip = True elif arg == "--local-old": # without ='<directory>' means we pull alphabetically from local PyPI # mirror at /srv/pypi/ # Parse .tar.gz files as they appear in bandersnatch version <= 1.8 # For newer versions of bandersnatch, the sdist files are stored # differently (not in project-based directories) and so the argument # --local should be used instead. use_local_index_old = True elif arg == "--local": # without ='<directory>' means we pull from local PyPI mirror at # /srv/pypi/ # Parse .tar.gz files as they appear in bandersnatch version 1.11 # For bandersnatch 1.11, the sdist files are stored differently than in # <1.8. They are no longer kept in project-based directories). # If you are using a version of bandersnatch <=1.8, the argument # --local-old should be used instead. use_local_index = True #elif arg == '--conflicting': # # Operate locally and run on the distkeys provided in the indicated # # file, each on its own line. # use_local_index = True # run_all_conflicting = True else: distkeys_to_inspect_not_normalized.append(arg) # e.g. 'motorengine(0.7.4)' # For simplicity right now, I'll use one mode or another, not both. # Last arg has it if both. # Normalize any input distkeys we were given. for distkey in distkeys_to_inspect_not_normalized: assert '(' in distkey and distkey.endswith(')'), 'Invalid input.' distkey = depdata.normalize_distkey(distkey) distkeys_to_inspect.append(distkey) # Were we not given any distkeys to inspect? if not distkeys_to_inspect:# and not run_all_conflicting: if not use_local_index and not use_local_index_old: # If we're not using a local index, we have nothing to do. raise ValueError('You neither specified distributions to scrape nor ' '(alternatively) indicated that they should be chosen from a local ' 'mirror.') elif use_local_index_old: # If we were told to work with a local mirror, but weren't given specific # sdists to inspect, we'll scan everything in # BANDERSNATCH_MIRROR_SDIST_DIR until we have n_sdists_to_process sdists. # There is a better way to do this, but I'll leave this as is for now. # Ensure that the local PyPI mirror directory exists first. if not os.path.exists(BANDERSNATCH_MIRROR_SDIST_DIR): raise Exception('--- Exception. Expecting a bandersnatched mirror of ' 'PyPI at ' + BANDERSNATCH_MIRROR_SDIST_DIR + ' but that directory ' 'does not exist.') i = 0 for dir, subdirs, files in os.walk(BANDERSNATCH_MIRROR_SDIST_DIR): for fname in files: if is_sdist(fname): tarfilename_full = os.path.join(dir, fname) # Deduce package names and versions from sdist filename. distkey = get_distkey_from_full_filename(tarfilename_full) distkeys_to_inspect.append(distkey) i += 1 # awkward control structures, but saving debug run time. tidy later if i >= n_sdists_to_process: break if i >= n_sdists_to_process: break else: # use_local_index (modern bandersnatch version) assert use_local_index, 'Programming error.' # # sdists live here: /srv/pypi/web/packages/??/??/*/*.tar.gz # # Can implement this such that it checks those places. # for name1 in os.listdir(BANDERSNATCH_NEW_MIRROR_SDIST_DIR): # if len(name1) != 2: # continue # for name2 in os.listdir(os.path.join( # BANDERSNATCH_NEW_MIRROR_SDIST_DIR, name1)): # if len(name2) != 2: # continue # for name3 in os.listdir(os.path.join( # BANDERSNATCH_NEW_MIRROR_SDIST_DIR, name1, name2)): # if len(name3) != 60: # continue # for fname in os.listdir(): # #.... No, this is not going to unambiguously get me the package name # # in the way that it used to in older versions of bandersnatch. # # Rather than dealing with unexpected naming consequences, I'll go # # with the following even more annoying hack.... # A dictionary of all versions of all packages on the mirror, # collected out-of-band (via xml-rpc at same time as mirroring occurred). vbp_mirror = json.load(open('data/versions_by_package.json', 'r')) i = 0 for package in vbp_mirror: if i >= n_sdists_to_process: break for version in vbp_mirror[package]: if i >= n_sdists_to_process: break distkey = depdata.distkey_format(package, version) distkeys_to_inspect.append(distkey) i += 1 # We should now have distkeys to inspect (unless run_all_conflicting is True). # Load the dependencies, conflicts, and blacklist databases. # The blacklist is a list of runs that resulted in errors or runs that were # manually added because, for example, they hang seemingly forever or take an # inordinate length of time. depdata.ensure_data_loaded([conflict_model]) # Alias depdata.conflicts_db to the relevant conflicts db. (Ugly) depdata.set_conflict_model_legacy(conflict_model) # should remove this #if run_all_conflicting: # distkeys_to_inspect = [distkey for distkey in depdata.conflicts_3_db if # depdata.conflicts_3_db[distkey]] n_inspected = 0 n_successfully_processed = 0 last_wrote_at = 0 # Now take all of the distkeys ( e.g. 'python-twitter(0.2.1)' ) indicated and # run on them. for distkey in distkeys_to_inspect: # To avoid losing too much data, make sure we at least write data to disk # about every 100 successfully processed or 10000 inspected dists. Avoid # writing repeatedly in edge cases (e.g. when we write after 100 # successfully processed and then have to keep writing for every skip that # occurs after that. progress = n_inspected + n_successfully_processed * 100 if progress > last_wrote_at + 10000: last_wrote_at = progress logger.info("Writing early.") depdata.write_data_to_files([conflict_model]) # The skip conditions. # If dist is in the blacklist for the same version of python we're running. blacklisted = distkey in depdata.blacklist \ and sys.version_info.major in depdata.blacklist[distkey] # If dist has conflict info saved already already_in_conflicts = distkey in depdata.conflicts_db # Do we have dep info for the dist? Not a skip condition, but part of # careful_skip tests. already_in_dependencies = distkey in depdata.dependencies_by_dist # If we're not in no_skip mode, perform the skip checks. # Skip checks. If the dist is blacklisted or we already have dependency # data, then skip it - unless we're in careful skip mode and we don't # have dependency data for the dist. if not no_skip and (blacklisted or already_in_conflicts): # If dist isn't blacklisted, we already have conflict info, there's no # dependency info, and careful skip is on, don't actually skip. if careful_skip and not already_in_dependencies and not blacklisted: print('--- Not skipping ' + distkey + ': ' + 'Already have conflict data, however there is no dependency info ' 'for the dist, the dist is not blacklisted, and we are in ' 'careful_skip mode.') else: # Skip, since we don't have a reason not to. n_inspected += 1 print('--- SKIP -- ' + distkey + ': ' + 'Blacklisted. '*blacklisted + 'Already have conflict data. '*already_in_conflicts + '(Finished ' + str(n_inspected) + ' out of ' + str(len(distkeys_to_inspect)) + ')') continue # If we didn't skip, process the dist. packagename = depdata.get_packname(distkey) version_string = depdata.get_version(distkey) #assert(distkey.rfind(')') == len(distkey) - 1) formatted_requirement = packagename + "==" + version_string exitcode = None assert(conflict_model in [1, 2, 3]) # Construct the argument list. # Include argument to pass to pip to tell it not to prod users about our # strange pip version (lest they follow that instruction and install a # standard pip version): pip_arglist = [ 'install', '-d', TEMPDIR_FOR_DOWNLOADED_DISTROS, '--disable-pip-version-check', '--find-dep-conflicts', str(conflict_model), '--quiet'] if use_local_index: pip_arglist.extend(['-i', BANDERSNATCH_MIRROR_INDEX_DIR]) pip_arglist.append(formatted_requirement) # With arg list constructed, call pip.main with it to run a modified pip # install attempt (will not install). # This assumes that we're dealing with my pip fork version 8.0.0.dev0seb). print('--- Sending ' + distkey + ' to pip.') logger.debug('Scraper says: before pip call, len(deps) is ' + str(len(depdata.dependencies_by_dist))) # Call pip, with a 5 minute timeout. exitcode = None # scoping paranoia try: exitcode = _call_pip_with_timeout(pip_arglist) except timeout.TimeoutException as e: # This catch is not likely. See below logger.warning('pip timed out on dist ' + distkey + '(5min)!' ' Will treat as error. Exception follows: ' + str(e.args)) # Set the exit code to something other than 2 or 0 and it'll be treated # like any old pip error below, resulting in a blacklist. exitcode = 1000 # However, unfortunately, we cannot assume that pip will let that exception # pass up to us. It seems to take the signal, stop and clean up, and then # return exit code 2. This is fine, except that then we can't really # blacklist the process. I'd have to add a timer here, detect something # very close to the timeout, and guess that it timed out. /: That sucks. # In any case, we'll not learn that it's a process that times out, but # we'll just look at it as a possible conflict case. (The data recorded # will not list it as a conflict. Hopefully, that data is not corrupted. # It's unlikely that it would have been, though, so I judge this OK.) # Process the output of the pip command. if exitcode == 2: print('--- X SDist ' + distkey + ' : pip errored out (code=' + str(exitcode) + '). Possible DEPENDENCY CONFLICT. Result recorded in ' 'conflicts_<...>.json. (Finished ' + str(n_inspected) + ' out of ' + str(len(distkeys_to_inspect)) + ')') elif exitcode == 0: print('--- . SDist ' + distkey + ' : pip completed successfully. ' 'No dependency conflicts observed. (Finished ' + str(n_inspected) + ' out of ' + str(len(distkeys_to_inspect)) + ')') else: print('--- . SDist ' + distkey + ': pip errored out (code=' + str(exitcode) + '), but it seems to have been unrelated to any dep ' 'conflict.... (Finished ' + str(n_inspected) + ' out of ' + str(len(distkeys_to_inspect)) + ')') # Store in the list of failing packages along with the python version # we're running. (sys.version_info.major yields int 2 or 3) # Contents are to eventually be a list of the major versions in which it # fails. We should never get here if the dist is already in the blacklist # for this version of python, but let's keep going even if so. if distkey in depdata.blacklist and sys.version_info.major in \ depdata.blacklist[distkey] and not no_skip: logger.warning(' WARNING! This should not happen! ' + distkey + ' was' 'already in the blacklist for python ' + str(sys.version_info.major) + ', thus it should not have been run unless we have --noskip on ' '(which it is not)!') else: # Either the dist is not in the blacklist or it's not in the blacklist # for this version of python. (Sensible) if distkey not in depdata.blacklist: # depdata.blacklist[distkey] = [sys.version_info.major] logger.info(" Added entry to blacklist for " + distkey) else: assert(no_skip or sys.version_info.major not in depdata.blacklist[distkey]) depdata.blacklist[distkey].append(sys.version_info.major) logger.info(" Added additional entry to blacklist for " + distkey) # end of exit code processing n_inspected += 1 n_successfully_processed += 1 # end of for each tarfile/sdist # We're done with all packages. Write the collected data back to file. logger.debug("Writing.") depdata.write_data_to_files([conflict_model])
def install_and_report(solution, local=False, dir_pip=None, ignore_setuptools=True): """ Accepts a list of distkeys indicating what distributions to try to install using pip. Arguments: - solution: a list of distkeys indicating what distributions to install - local (optional): - if not provided, we connect to PyPI - if simply set to 'True', we use the default local bandersnatch location for the simple listing of packages, 'file:///srv/pypi/web/simple'. - if another value is provided, we interpret it as a string indicating the location of the simple index listing of packages on the mirror to use. - dir_pip (optional): If a special version of pip is to be used, this is the directory where it resides. If left as None, we simply use the default version of pip installed with virtualenv. If provided, we'll use 'pip install -e .' to install it. - ignore_setuptools (optional): Default True. If True, when testing the given solution to see that it was installed, ignores distributions of packages setuptools, pip, and wheel, which would already be in any virtual environment anyway. Returns: - success: True if all distkeys in the solution were actually installed in the virtual environment, else False. - venv: The directory containing the virtual environment created for this install. - stderr_installation: stderr.read().decode() for the pip install subprocess command that installed the distribution. In case of install errors. Empty string if stderr was empty. Raises: - UnrelatedInstallFailure if the the installation fails in some trivial way that merits trying again. Steps: 1. Sets up a random-name new virtual environment 2. Installs the given distributions using pip """ errstring = '' # Argument processing: Sanitize solution set distkeys. try: solution = [depdata.normalize_distkey(distkey) for distkey in solution] except Exception as e: logger.error('Unable to sanitize distkeys in the solution provided. ' 'Solution: ' + str(solution)) raise ############### # Step 1: # Create the virtual environment (venv) and validate it by trying to source # it. Sometimes this goes wrong. I don't know why yet. venv_dir = None # Try to create a venv up to three times. success = False while not success: logger.info('Trying to create new virtual environment.') try: venv_dir = create_venv() except UnrelatedInstallFailure: logger.error('Failed to create virtual environment. Trying once more.') venv_dir = create_venv() else: success = True logger.info('Successfully created virtual environment: ' + venv_dir) # To use it, we'll have to source it before any other command. cmd_sourcevenv = get_source_venv_cmd_str(venv_dir) # If we've been given a specific pip to install, we use that instead of # leaving the default. if dir_pip is not None: logger.info('Instructed to use custom pip @ ' + dir_pip + '. Installing ' 'that custom pip version into venv ' + venv_dir) cmd_install_custom_pip = cmd_sourcevenv + '; cd ' + dir_pip + \ '; pip install -e . --disable-pip-version-check' stdout, stderr = popen_wrapper(cmd_install_custom_pip) # Consider checking for failure here. #cmd_check_pip_ver = cmd_sourcevenv + '; pip --version' # The actual installation. logger.info('Starting installation in venv ' + venv_dir + ' of solution ' 'set: ' + str(solution)) stdout_install, stderr_install = install_into_venv(solution, venv_dir, local) # Determine what actually got installed (excluding default stuff like pip # and setuptools and wheel). installed_distkeys = get_dists_installed_in_venv(venv_dir) # Determine if the full set of things directed to be installed was actually # installed. dists_missing = [] for distkey in solution: if depdata.get_packname(distkey) not in PACKAGES_IN_ALL_VENVS and \ distkey not in installed_distkeys: logger.info('Missing distkey from solution: ' + distkey) dists_missing.append(distkey) # Return: # - whether or not the distkeys were all installed # - where the new virtual environment with everything installed is # - error string if there was an error return not dists_missing, venv_dir, stderr_install
def main(): """ Choose some conflicting dists to test rbtcollins solver on. Steps: 1. Load dependency data. 2. Call rbttest to solve using rbtcollins' pip branch issue-988 and determine the correctness of that solution. 3. Write all the solution sets and correctness info to a json file. """ # Create virtual environments directory if it doesn't exist. if not os.path.exists(VENVS_DIR): os.makedirs(VENVS_DIR) distkeys_to_solve = [] n_distkeys = 3 # default 3, overriden by argument --n=, or specific distkeys args = sys.argv[1:] noskip = False local = False all_conflicting = False if args: for arg in args: if arg == '--noskip': noskip = True elif arg == '--local': local = True elif arg.startswith('--local='): local = arg[8:] elif arg.startswith('--n='): n_distkeys = int(arg[4:]) elif arg == '--all': local = True all_conflicting = True else: try: distkeys_to_solve.append(depdata.normalize_distkey(arg)) except Exception as e: print( 'Unable to normalize provided argument as distkey: ' + str(arg) + '. Please provide correct arguments.') raise # If we didn't get any specific distkeys to solve for from the args, then # pick randomly: if distkeys_to_solve: n_distkeys = len(distkeys_to_solve) else: # not distkeys_to_solve: # Randomize from the model 3 conflict list. depdata.ensure_data_loaded() con3 = depdata.conflicts_3_db conflicting = [depdata.normalize_distkey(d) for d in con3 if con3[d]] if all_conflicting: distkeys_to_solve = conflicting else: import random for i in range(0, n_distkeys): distkeys_to_solve.append(random.choice(conflicting)) ############### # Step 1: Load dependency data. # Load dependencies from their json file, harvested in prior full run of # scraper. depdata.ensure_data_loaded(include_edeps=True) deps = depdata.dependencies_by_dist edeps = depdata.elaborated_dependencies # potentially stale! versions = depdata.versions_by_package # potentially stale! # Prepare solution dictionary. solution_dict = depdata.load_json_db(SOLUTIONS_JSON_FNAME) ############### # Step 2: Run rbttest to solve and test solution. try: for distkey in distkeys_to_solve: if not noskip and distkey in solution_dict: logger.info('Skipping rbt solve for ' + distkey + ' (already have ' 'results).') continue # Try-scoping paranoia. installed = None satisfied = None solution = None errstring = None stderr_installation = None try: # Explicit with multiple variables for clarity for the reader. (installed, satisfied, solution, errstring, stderr_installation) = \ rbttest(distkey, edeps, versions, local) solution_dict[distkey] = (installed, satisfied, solution, errstring, stderr_installation) except UnrelatedInstallFailure as e: # Installation failed in some trivial way and should be retried once. # For example, virtual environment creation failed. (installed, satisfied, solution, errstring, stderr_installation) = \ rbttest(distkey, edeps, versions, local) solution_dict[distkey] = (installed, satisfied, solution, errstring, stderr_installation) # ############### # # Step 3: Dump solutions and solution correctness info to file. # # Until this is stable, write after every solution so as not to lose data. # logger.info('Writing results for ' + distkey) # json.dump(solution_dict, open(SOLUTIONS_JSON_FNAME, 'w')) except: print( 'Encountered ERROR. Saving solutions to file and halting. Error:') traceback.print_exc() ############### # Step 3: Dump solutions and solution correctness info to file. finally: print('Writing solutions gathered to ' + SOLUTIONS_JSON_FNAME) try: json.dump(solution_dict, open(SOLUTIONS_JSON_FNAME, 'w')) except: import ipdb ipdb.set_trace() print( 'Tried to write gathered solutions to file, but failed to write.' 'Entering debug mode to allow data recovery.')
def install_and_report(solution, local=False, dir_pip=None, ignore_setuptools=True): """ Accepts a list of distkeys indicating what distributions to try to install using pip. Arguments: - solution: a list of distkeys indicating what distributions to install - local (optional): - if not provided, we connect to PyPI - if simply set to 'True', we use the default local bandersnatch location for the simple listing of packages, 'file:///srv/pypi/web/simple'. - if another value is provided, we interpret it as a string indicating the location of the simple index listing of packages on the mirror to use. - dir_pip (optional): If a special version of pip is to be used, this is the directory where it resides. If left as None, we simply use the default version of pip installed with virtualenv. If provided, we'll use 'pip install -e .' to install it. - ignore_setuptools (optional): Default True. If True, when testing the given solution to see that it was installed, ignores distributions of packages setuptools, pip, and wheel, which would already be in any virtual environment anyway. Returns: - success: True if all distkeys in the solution were actually installed in the virtual environment, else False. - venv: The directory containing the virtual environment created for this install. - stderr_installation: stderr.read().decode() for the pip install subprocess command that installed the distribution. In case of install errors. Empty string if stderr was empty. Raises: - UnrelatedInstallFailure if the the installation fails in some trivial way that merits trying again. Steps: 1. Sets up a random-name new virtual environment 2. Installs the given distributions using pip """ errstring = '' # Argument processing: Sanitize solution set distkeys. try: solution = [depdata.normalize_distkey(distkey) for distkey in solution] except Exception as e: logger.error('Unable to sanitize distkeys in the solution provided. ' 'Solution: ' + str(solution)) raise ############### # Step 1: # Create the virtual environment (venv) and validate it by trying to source # it. Sometimes this goes wrong. I don't know why yet. venv_dir = None # Try to create a venv up to three times. success = False while not success: logger.info('Trying to create new virtual environment.') try: venv_dir = create_venv() except UnrelatedInstallFailure: logger.error( 'Failed to create virtual environment. Trying once more.') venv_dir = create_venv() else: success = True logger.info('Successfully created virtual environment: ' + venv_dir) # To use it, we'll have to source it before any other command. cmd_sourcevenv = get_source_venv_cmd_str(venv_dir) # If we've been given a specific pip to install, we use that instead of # leaving the default. if dir_pip is not None: logger.info('Instructed to use custom pip @ ' + dir_pip + '. Installing ' 'that custom pip version into venv ' + venv_dir) cmd_install_custom_pip = cmd_sourcevenv + '; cd ' + dir_pip + \ '; pip install -e . --disable-pip-version-check' stdout, stderr = popen_wrapper(cmd_install_custom_pip) # Consider checking for failure here. #cmd_check_pip_ver = cmd_sourcevenv + '; pip --version' # The actual installation. logger.info('Starting installation in venv ' + venv_dir + ' of solution ' 'set: ' + str(solution)) stdout_install, stderr_install = install_into_venv(solution, venv_dir, local) # Determine what actually got installed (excluding default stuff like pip # and setuptools and wheel). installed_distkeys = get_dists_installed_in_venv(venv_dir) # Determine if the full set of things directed to be installed was actually # installed. dists_missing = [] for distkey in solution: if depdata.get_packname(distkey) not in PACKAGES_IN_ALL_VENVS and \ distkey not in installed_distkeys: logger.info('Missing distkey from solution: ' + distkey) dists_missing.append(distkey) # Return: # - whether or not the distkeys were all installed # - where the new virtual environment with everything installed is # - error string if there was an error return not dists_missing, venv_dir, stderr_install
def rbttest(distkey, edeps, versions, local=False, dir_rbt_pip='../pipcollins'): """ Accepts a distkey indicating what distribution to try to install using rbtcollins' issue-988 pip branch as a way to solve conflicts. Steps: 1. Solve using rbtcollins' pip branch issue-988: For each distkey, create a new virtual environment, install rbtcollins' pip version within it, use it to install the dist indicated, and use `pip list` to determine what it installed. 2. Run resolvability.are_fully_satisfied to test the solution set for consistency. Arguments: - distkeys: a list of distkeys indicating what distributions to solve for - edeps: elaborated dependency data (see depdata.py) - versions: versions by package (dict mapping package name to the available versions for that package) - local (optional): - if not provided, we connect to PyPI - if simply set to 'True', we use the default local bandersnatch location for the simple listing of packages, 'file:///srv/pypi/web/simple'. - if another value is provided, we interpret it as a string indicating the location of the simple index listing of packages on the mirror to use. Returns: - Installed: True if distkey was able to be installed with rbt pip - Satisfied: True if the solution set rbt pip generated was fully satisfied, i.e. all dependencies of the given dist were satisfied, along with all of their dependencies, and so on, with no dependency conflicts. - Solution: the solution set (all distkeys installed) - errstring: a string describing the error encountered if any was encountered. - stderr_installation: stderr.read().decode() for the pip install subprocess command that installed the distribution. In case of install errors. Empty string if stderr was empty. Raises: - UnrelatedInstallFailure if the the installation fails in some trivial way that merits trying again. """ errstring = '' # Sanitize distkey: unsanitized_distkey = distkey # storing only for debug distkey = depdata.normalize_distkey(distkey) logger.debug('Sanitizing distkey: from ' + unsanitized_distkey + ' to ' + distkey) ############### # Step 1 # Figure out the install candidate solution. logger.info('Starting rbt resolve of ' + distkey) # declaring here for try/except scope reasons; # declaring two for distkey normalization & debug unsanitized_solution = [] solution = [] stderr_installation = '' # will be whatever is printed to stderr during the # installation of the distribution using rbtpip # Run rbtcollins' pip branch to find the solution, with some acrobatics. try: (unsanitized_solution, stderr_installation) = \ rbt_backtracking_satisfy(distkey, edeps, versions, local) except depresolve._external.timeout.TimeoutException as e: errstring = 'Timed out during install' logger.error('Unable to install ' + distkey + ' using rbt pip. ' + errstring) return (False, False, [], errstring, stderr_installation) except UnrelatedInstallFailure as e: # Expect this to just be retried immediately. raise #errstring = e.msg # Failed to get through the early stages of the install. #return (False, False, [], errstring, stderr_installation) # Sanitize solution, which may in particular have non-lowercase names: for sol_distkey in unsanitized_solution: solution.append(depdata.normalize_distkey(sol_distkey)) ############### # Step 2: Run resolvability.are_fully_satisfied to test the solution set # for consistency. # Test the solution. # If the given solution doesn't even include the distribution to install # itself, it's obviously not been successful. satisfied = False installed = distkey in [d.lower() for d in solution] # sanitize old data if not installed: if 'Hit step limit during requirement resolving.' in stderr_installation: errstring = 'Hit step limit during requirement resolving.' logger.error('Unable to install ' + distkey + ' using rbt pip. ' + errstring + '. Solution does not contain ' + distkey + '. Solution ' 'was: ' + str(solution)) elif 'Timed out' in stderr_installation: errstring = 'Timed out: >5min install' logger.error('Unable to install ' + distkey + ' using rbt pip. ' + errstring + '. Solution does not contain ' + distkey + '. Solution ' 'was: ' + str(solution)) elif solution: errstring = 'Non-empty solution without target distkey' logger.error( 'Unable to install ' + distkey + ' using rbt pip. ' + errstring + '. Solution does not contain ' + distkey + '. Presume ' 'failure; unclear why anything was installed at all - possibly ' 'failure in middle of installations, after some dependencies were ' 'installed? Solution was: ' + str(solution)) else: errstring = 'Empty solution, reason unknown.' logger.error('Unable to install ' + distkey + ' using rbt pip. ' + errstring + '. Presume pip failure.') else: # If it's in there, then we check to see if the solution is fully # satisfied. (Note that because virtual environments start off with pip, # wheel, and setuptools, we can't tell when a solution includes them, # don't store those as part of the solution, and so disregard them in this # dependency check. ): try: satisfied = ry.are_fully_satisfied(solution, edeps, versions, disregard_setuptools=True) except depresolve.MissingDependencyInfoError as e: errstring = 'Unable to determine if satisfied: missing dep info for ' + \ str(e.args[1]) satisfied = '' # evaluates False but is not False logger.error(errstring + '. Resolution for ' + distkey + ' unknown. ' + ' Full exception:' + str(e)) # Use venv catalog to print debug info. Clunky. global venv_catalog if venv_catalog is None: venv_catalog = depdata.load_json_db(VENV_CATALOG_JSON_FNAME) logger.info('Tried solving ' + distkey + ' using rbtcollins pip patch. ' 'Installed: ' + str(installed) + '. Satisfied: ' + str(satisfied) + ' virtualenv used: ' + venv_catalog[distkey]) # Return the solution that rbt generates for this distkey: # - whether or not the distkey itself was installed # - whether or not the install set is fully satisfied and conflict-less # - what the solution set is # - error string if there was an error return (installed, satisfied, solution, errstring, stderr_installation)