def test_try_acquire_locks(testing_workdir): # Acquiring two unlocked locks should succeed. lock1 = filelock.FileLock(os.path.join(testing_workdir, 'lock1')) lock2 = filelock.FileLock(os.path.join(testing_workdir, 'lock2')) with utils.try_acquire_locks([lock1, lock2], timeout=1): pass # Acquiring the same lock twice should fail. lock1_copy = filelock.FileLock(os.path.join(testing_workdir, 'lock1')) # Also verify that the error message contains the word "lock", since we rely # on this elsewhere. with pytest.raises(BuildLockError, message='Failed to acquire all locks'): with utils.try_acquire_locks([lock1, lock1_copy], timeout=1): pass
def test_try_acquire_locks(testing_workdir): # Acquiring two unlocked locks should succeed. lock1 = filelock.FileLock(os.path.join(testing_workdir, 'lock1')) lock2 = filelock.FileLock(os.path.join(testing_workdir, 'lock2')) with utils.try_acquire_locks([lock1, lock2], timeout=1): pass # Acquiring the same lock twice should fail. lock1_copy = filelock.FileLock(os.path.join(testing_workdir, 'lock1')) # Also verify that the error message contains the word "lock", since we rely # on this elsewhere. with pytest.raises(BuildLockError, match='Failed to acquire all locks'): with utils.try_acquire_locks([lock1, lock1_copy], timeout=1): pass
def get_install_actions(prefix, index, specs, config, retries=0): log = utils.get_logger(__name__) if config.verbose: capture = contextlib.contextmanager(lambda: (yield)) else: capture = utils.capture actions = {'LINK': []} specs = [_ensure_valid_spec(spec) for spec in specs] if specs: # this is hiding output like: # Fetching package metadata ........... # Solving package specifications: .......... with capture(): try: actions = plan.install_actions(prefix, index, specs) except NoPackagesFoundError as exc: raise DependencyNeedsBuildingError(exc) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError, AssertionError) as exc: if 'lock' in str(exc): log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc) or isinstance(exc, AssertionError)): locks = utils.get_conda_operation_locks(config) with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retries < config.max_env_retry: log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) actions = get_install_actions(prefix, index, specs, config, retries=retries + 1) else: log.error( "Failed to get install actions, max retries exceeded.") raise if config.disable_pip: actions['LINK'] = [ spec for spec in actions['LINK'] if not spec.startswith('pip-') and not spec.startswith('setuptools-') ] return actions
def remove_existing_packages(dirs, fns, config): locks = get_pkg_dirs_locks(dirs, config) if config.locking else [] with utils.try_acquire_locks(locks, timeout=config.timeout): for folder in dirs: for fn in fns: all_files = [fn] if not os.path.isabs(fn): all_files = glob(os.path.join(folder, fn + '*')) for entry in all_files: utils.rm_rf(entry)
def write_repodata(repodata, dir_path, lock, locking=90, timeout=90): """ Write updated repodata.json and repodata.json.bz2 """ locks = [] if locking: locks = [lock] with try_acquire_locks(locks, timeout): data = json.dumps(repodata, indent=2, sort_keys=True) # strip trailing whitespace data = '\n'.join(line.rstrip() for line in data.splitlines()) # make sure we have newline at the end if not data.endswith('\n'): data += '\n' with open(join(dir_path, 'repodata.json'), 'w') as fo: fo.write(data) with open(join(dir_path, 'repodata.json.bz2'), 'wb') as fo: fo.write(bz2.compress(data.encode('utf-8')))
def read_index_tar(tar_path, config, lock): """ Returns the index.json dict inside the given package tarball. """ if config.locking: locks = [lock] with try_acquire_locks(locks, config.timeout): with tarfile.open(tar_path) as t: try: return json.loads(t.extractfile('info/index.json').read().decode('utf-8')) except EOFError: raise RuntimeError("Could not extract %s. File probably corrupt." % tar_path) except OSError as e: raise RuntimeError("Could not extract %s (%s)" % (tar_path, e)) except tarfile.ReadError: raise RuntimeError("Could not extract metadata from %s. " "File probably corrupt." % tar_path)
def clean_pkg_cache(dist, config): locks = [] conda_log_level = logging.WARN if config.debug: conda_log_level = logging.DEBUG _pkgs_dirs = pkgs_dirs[:1] if config.locking: locks = [ utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs ] with utils.LoggingContext(conda_log_level): with utils.try_acquire_locks(locks, timeout=config.timeout): rmplan = [ 'RM_EXTRACTED {0} local::{0}'.format(dist), 'RM_FETCHED {0} local::{0}'.format(dist), ] execute_plan(rmplan) # Conda does not seem to do a complete cleanup sometimes. This is supplemental. # Conda's cleanup is still necessary - it keeps track of its own in-memory # list of downloaded things. for folder in pkgs_dirs: try: assert not os.path.exists(os.path.join(folder, dist)) assert not os.path.exists( os.path.join(folder, dist + '.tar.bz2')) for pkg_id in [dist, 'local::' + dist]: assert pkg_id not in package_cache() except AssertionError: log = utils.get_logger(__name__) log.debug( "Conda caching error: %s package remains in cache after removal", dist) log.debug("manually removing to compensate") cache = package_cache() keys = [key for key in cache.keys() if dist in key] for pkg_id in keys: if pkg_id in cache: del cache[pkg_id] for entry in glob(os.path.join(folder, dist + '*')): utils.rm_rf(entry)
def clean_pkg_cache(dist, config): locks = [] conda_log_level = logging.WARN if config.debug: conda_log_level = logging.DEBUG _pkgs_dirs = pkgs_dirs[:1] if config.locking: locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs] with utils.LoggingContext(conda_log_level): with utils.try_acquire_locks(locks, timeout=config.timeout): rmplan = [ 'RM_EXTRACTED {0} local::{0}'.format(dist), 'RM_FETCHED {0} local::{0}'.format(dist), ] execute_plan(rmplan) # Conda does not seem to do a complete cleanup sometimes. This is supplemental. # Conda's cleanup is still necessary - it keeps track of its own in-memory # list of downloaded things. for folder in pkgs_dirs: try: assert not os.path.exists(os.path.join(folder, dist)) assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2')) for pkg_id in [dist, 'local::' + dist]: assert pkg_id not in package_cache() except AssertionError: log = utils.get_logger(__name__) log.debug("Conda caching error: %s package remains in cache after removal", dist) log.debug("manually removing to compensate") cache = package_cache() keys = [key for key in cache.keys() if dist in key] for pkg_id in keys: if pkg_id in cache: del cache[pkg_id] for entry in glob(os.path.join(folder, dist + '*')): utils.rm_rf(entry)
def clean_pkg_cache(dist, config): locks = [] conda_log_level = logging.WARN if config.debug: conda_log_level = logging.DEBUG with utils.LoggingContext(conda_log_level): locks = get_pkg_dirs_locks([config.bldpkgs_dir] + pkgs_dirs, config) with utils.try_acquire_locks(locks, timeout=config.timeout): rmplan = [ 'RM_EXTRACTED {0} local::{0}'.format(dist), 'RM_FETCHED {0} local::{0}'.format(dist), ] execute_plan(rmplan) # Conda does not seem to do a complete cleanup sometimes. This is supplemental. # Conda's cleanup is still necessary - it keeps track of its own in-memory # list of downloaded things. for folder in pkgs_dirs: if (os.path.exists(os.path.join(folder, dist)) or os.path.exists( os.path.join(folder, dist + '.tar.bz2')) or any(pkg_id in package_cache() for pkg_id in [dist, 'local::' + dist])): log = utils.get_logger(__name__) log.debug( "Conda caching error: %s package remains in cache after removal", dist) log.debug("manually removing to compensate") cache = package_cache() keys = [key for key in cache.keys() if dist in key] for pkg_id in keys: if pkg_id in cache: del cache[pkg_id] # Note that this call acquires the relevant locks, so this must be called # outside the lock context above. remove_existing_packages(pkgs_dirs, [dist], config)
def get_install_actions(prefix, index, specs, config, retries=0, timestamp=0, subdir=None): global _cached_install_actions global _last_timestamp log = utils.get_logger(__name__) if config.verbose: capture = contextlib.contextmanager(lambda: (yield)) else: capture = utils.capture actions = {'LINK': []} for feature, value in feature_list: if value: specs.append('%s@' % feature) specs = tuple(_ensure_valid_spec(spec) for spec in specs) if (specs, subdir, timestamp ) in _cached_install_actions and timestamp > _last_timestamp: actions = _cached_install_actions[(specs, subdir, timestamp)] else: if specs: # this is hiding output like: # Fetching package metadata ........... # Solving package specifications: .......... with capture(): try: actions = install_actions(prefix, index, specs, force=True) # Experimenting with getting conda to create fewer Resolve objects # Experiment failed, seemingly due to conda's statefulness. Packages could # not be found. # index_timestamp=timestamp) except NoPackagesFoundError as exc: # Attempt to skeleton packages it can't find packages = [x.split(" ")[0] for x in exc.pkgs] for pkg in packages: if pkg.startswith("r-"): api.skeletonize([pkg], "cran") else: api.skeletonize([pkg], "pypi") raise DependencyNeedsBuildingError(exc, subdir=subdir) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError, AssertionError) as exc: if 'lock' in str(exc): log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc) or isinstance(exc, AssertionError)): locks = utils.get_conda_operation_locks(config) with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retries < config.max_env_retry: log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) actions = get_install_actions(prefix, index, specs, config, retries=retries + 1, timestamp=timestamp) else: log.error( "Failed to get install actions, max retries exceeded." ) raise if config.disable_pip: actions['LINK'] = [ spec for spec in actions['LINK'] if not spec.startswith('pip-') and not spec.startswith('setuptools-') ] utils.trim_empty_keys(actions) _cached_install_actions[(specs, subdir, timestamp)] = actions _last_timestamp = timestamp return actions
def update_index(dir_path, force=False, check_md5=False, remove=True, lock=None, could_be_mirror=True, verbose=True, locking=True, timeout=90): """ Update all index files in dir_path with changed packages. :param verbose: Should detailed status messages be output? :type verbose: bool :param force: Whether to re-index all packages (including those that haven't changed) or not. :type force: bool :param check_md5: Whether to check MD5s instead of mtimes for determining if a package changed. :type check_md5: bool """ log = utils.get_logger(__name__) log.debug("updating index in: %s", dir_path) if not os.path.isdir(dir_path): os.makedirs(dir_path) index_path = join(dir_path, '.index.json') if not lock: lock = get_lock(dir_path) locks = [] if locking: locks.append(lock) index = {} with try_acquire_locks(locks, timeout): if not force: try: mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'} with open(index_path, **mode_dict) as fi: index = json.load(fi) except (IOError, ValueError): index = {} files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2')) for fn in files: path = join(dir_path, fn) if fn in index: if check_md5: if index[fn]['md5'] == md5_file(path): continue elif index[fn]['mtime'] == getmtime(path): continue if verbose: print('updating:', fn) d = read_index_tar(path, lock=lock, locking=locking, timeout=timeout) d.update(file_info(path)) index[fn] = d for fn in files: index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None if remove: # remove files from the index which are not on disk for fn in set(index) - files: if verbose: print("removing:", fn) del index[fn] # Deal with Python 2 and 3's different json module type reqs mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'} with open(index_path, **mode_dict) as fo: json.dump(index, fo, indent=2, sort_keys=True, default=str) # --- new repodata for fn in index: info = index[fn] for varname in 'arch', 'platform', 'mtime', 'ucs': try: del info[varname] except KeyError: pass if 'requires' in info and 'depends' not in info: info['depends'] = info['requires'] repodata = {'packages': index, 'info': {}} write_repodata(repodata, dir_path, lock=lock, locking=locking, timeout=timeout)
def update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None, could_be_mirror=True): """ Update all index files in dir_path with changed packages. :param verbose: Should detailed status messages be output? :type verbose: bool :param force: Whether to re-index all packages (including those that haven't changed) or not. :type force: bool :param check_md5: Whether to check MD5s instead of mtimes for determining if a package changed. :type check_md5: bool """ log = utils.get_logger(__name__) log.debug("updating index in: %s", dir_path) if not os.path.isdir(dir_path): os.makedirs(dir_path) index_path = join(dir_path, '.index.json') if not lock: lock = get_lock(dir_path) locks = [] if config.locking: locks.append(lock) index = {} with try_acquire_locks(locks, config.timeout): if not force: try: mode_dict = { 'mode': 'r', 'encoding': 'utf-8' } if PY3 else { 'mode': 'rb' } with open(index_path, **mode_dict) as fi: index = json.load(fi) except (IOError, ValueError): index = {} subdir = None files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2')) if could_be_mirror and any(fn.startswith('_license-') for fn in files): sys.exit("""\ Error: Indexing a copy of the Anaconda conda package channel is neither necessary nor supported. If you wish to add your own packages, you can do so by adding them to a separate channel. """) for fn in files: path = join(dir_path, fn) if fn in index: if check_md5: if index[fn]['md5'] == md5_file(path): continue elif index[fn]['mtime'] == getmtime(path): continue if config.verbose: print('updating:', fn) d = read_index_tar(path, config, lock=lock) d.update(file_info(path)) index[fn] = d # there's only one subdir for a given folder, so only read these contents once if not subdir: subdir = d['subdir'] for fn in files: index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None if remove: # remove files from the index which are not on disk for fn in set(index) - files: if config.verbose: print("removing:", fn) del index[fn] # Deal with Python 2 and 3's different json module type reqs mode_dict = { 'mode': 'w', 'encoding': 'utf-8' } if PY3 else { 'mode': 'wb' } with open(index_path, **mode_dict) as fo: json.dump(index, fo, indent=2, sort_keys=True, default=str) # --- new repodata for fn in index: info = index[fn] for varname in 'arch', 'platform', 'mtime', 'ucs': try: del info[varname] except KeyError: pass if 'requires' in info and 'depends' not in info: info['depends'] = info['requires'] repodata = {'packages': index, 'info': {}} write_repodata(repodata, dir_path, lock=lock, config=config)
def update_index(dir_path, force=False, check_md5=False, remove=True, lock=None, could_be_mirror=True, verbose=True, locking=True, timeout=90): """ Update all index files in dir_path with changed packages. :param verbose: Should detailed status messages be output? :type verbose: bool :param force: Whether to re-index all packages (including those that haven't changed) or not. :type force: bool :param check_md5: Whether to check MD5s instead of mtimes for determining if a package changed. :type check_md5: bool """ log = utils.get_logger(__name__) log.debug("updating index in: %s", dir_path) if not os.path.isdir(dir_path): os.makedirs(dir_path) index_path = join(dir_path, '.index.json') if not lock: lock = get_lock(dir_path) locks = [] if locking: locks.append(lock) index = {} with try_acquire_locks(locks, timeout): if not force: try: mode_dict = { 'mode': 'r', 'encoding': 'utf-8' } if PY3 else { 'mode': 'rb' } with open(index_path, **mode_dict) as fi: index = json.load(fi) except (IOError, ValueError): index = {} files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2')) for fn in files: path = join(dir_path, fn) if fn in index: if check_md5: if index[fn]['md5'] == md5_file(path): continue elif index[fn]['mtime'] == getmtime(path): continue if verbose: print('updating:', fn) d = read_index_tar(path, lock=lock, locking=locking, timeout=timeout) d.update(file_info(path)) index[fn] = d for fn in files: index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None if remove: # remove files from the index which are not on disk for fn in set(index) - files: if verbose: print("removing:", fn) del index[fn] # Deal with Python 2 and 3's different json module type reqs mode_dict = { 'mode': 'w', 'encoding': 'utf-8' } if PY3 else { 'mode': 'wb' } with open(index_path, **mode_dict) as fo: json.dump(index, fo, indent=2, sort_keys=True, default=str) # --- new repodata for fn in index: info = index[fn] for varname in 'arch', 'platform', 'mtime', 'ucs': try: del info[varname] except KeyError: pass if 'requires' in info and 'depends' not in info: info['depends'] = info['requires'] repodata = {'packages': index, 'info': {}} write_repodata(repodata, dir_path, lock=lock, locking=locking, timeout=timeout)
def create_env(prefix, specs, config, subdir, clear_cache=True, retry=0, index=None, locks=None): ''' Create a conda envrionment for the given prefix and specs. ''' if config.debug: utils.get_logger("conda_build").setLevel(logging.DEBUG) external_logger_context = utils.LoggingContext(logging.DEBUG) else: utils.get_logger("conda_build").setLevel(logging.INFO) external_logger_context = utils.LoggingContext(logging.ERROR) with external_logger_context: log = utils.get_logger(__name__) if os.path.isdir(prefix): utils.rm_rf(prefix) specs = list(set(specs)) for feature, value in feature_list: if value: specs.append('%s@' % feature) if specs: # Don't waste time if there is nothing to do log.debug("Creating environment in %s", prefix) log.debug(str(specs)) with utils.path_prepended(prefix): if not locks: locks = utils.get_conda_operation_locks(config) try: with utils.try_acquire_locks(locks, timeout=config.timeout): if not index: index = get_build_index(config=config, subdir=subdir) actions = get_install_actions(prefix, index, specs, config) plan.display_actions(actions, index) if utils.on_win: for k, v in os.environ.items(): os.environ[k] = str(v) plan.execute_actions(actions, index, verbose=config.debug) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError) as exc: if (("too short in" in str(exc) or re.search( 'post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or isinstance(exc, PaddingError)) and config.prefix_length > 80): if config.prefix_length_fallback: log.warn( "Build prefix failed with prefix length %d", config.prefix_length) log.warn("Error was: ") log.warn(str(exc)) log.warn( "One or more of your package dependencies needs to be rebuilt " "with a longer prefix length.") log.warn( "Falling back to legacy prefix length of 80 characters." ) log.warn( "Your package will not install into prefixes > 80 characters." ) config.prefix_length = 80 # Set this here and use to create environ # Setting this here is important because we use it below (symlink) prefix = config.build_prefix create_env(prefix, specs, config=config, subdir=subdir, clear_cache=clear_cache) else: raise elif 'lock' in str(exc): if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs, config=config, subdir=subdir, clear_cache=clear_cache, retry=retry + 1) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc)): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs, config=config, subdir=subdir, clear_cache=clear_cache, retry=retry + 1) else: log.error( "Failed to create env, max retries exceeded.") raise else: raise # HACK: some of the time, conda screws up somehow and incomplete packages result. # Just retry. except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc: if isinstance(exc, AssertionError): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = os.path.dirname(os.path.dirname( str(exc))) log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs, config=config, subdir=subdir, clear_cache=clear_cache, retry=retry + 1) else: log.error( "Failed to create env, max retries exceeded.") raise if utils.on_win: shell = "cmd.exe" else: shell = "bash" symlink_conda(prefix, sys.prefix, shell)
def get_install_actions(prefix, specs, env, retries=0, subdir=None, verbose=True, debug=False, locking=True, bldpkgs_dirs=None, timeout=90, disable_pip=False, max_env_retry=3, output_folder=None, channel_urls=None): global cached_actions global last_index_ts actions = {} log = utils.get_logger(__name__) conda_log_level = logging.WARN if verbose: capture = contextlib.contextmanager(lambda: (yield)) elif debug: capture = contextlib.contextmanager(lambda: (yield)) conda_log_level = logging.DEBUG else: capture = utils.capture for feature, value in feature_list: if value: specs.append('%s@' % feature) bldpkgs_dirs = ensure_list(bldpkgs_dirs) index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder, channel_urls=channel_urls, debug=debug, verbose=verbose, locking=locking, timeout=timeout) specs = tuple(_ensure_valid_spec(spec) for spec in specs) if (specs, env, subdir, channel_urls) in cached_actions and last_index_ts >= index_ts: actions = cached_actions[(specs, env, subdir, channel_urls)].copy() if "PREFIX" in actions: actions['PREFIX'] = prefix elif specs: # this is hiding output like: # Fetching package metadata ........... # Solving package specifications: .......... with utils.LoggingContext(conda_log_level): with capture(): try: actions = install_actions(prefix, index, specs, force=True) except NoPackagesFoundError as exc: raise DependencyNeedsBuildingError(exc, subdir=subdir) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError, AssertionError) as exc: if 'lock' in str(exc): log.warn("failed to get install actions, retrying. exception was: %s", str(exc)) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc) or isinstance(exc, AssertionError)): locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout) with utils.try_acquire_locks(locks, timeout=timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn("I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retries < max_env_retry: log.warn("failed to get install actions, retrying. exception was: %s", str(exc)) actions = get_install_actions(prefix, tuple(specs), env, retries=retries + 1, subdir=subdir, verbose=verbose, debug=debug, locking=locking, bldpkgs_dirs=tuple(bldpkgs_dirs), timeout=timeout, disable_pip=disable_pip, max_env_retry=max_env_retry, output_folder=output_folder, channel_urls=tuple(channel_urls)) else: log.error("Failed to get install actions, max retries exceeded.") raise if disable_pip: actions['LINK'] = [spec for spec in actions['LINK'] if not spec.startswith('pip-') and not spec.startswith('setuptools-')] utils.trim_empty_keys(actions) cached_actions[(specs, env, subdir, channel_urls)] = actions.copy() last_index_ts = index_ts return actions
def update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None, could_be_mirror=True): """ Update all index files in dir_path with changed packages. :param verbose: Should detailed status messages be output? :type verbose: bool :param force: Whether to re-index all packages (including those that haven't changed) or not. :type force: bool :param check_md5: Whether to check MD5s instead of mtimes for determining if a package changed. :type check_md5: bool """ if config.verbose: print("updating index in:", dir_path) index_path = join(dir_path, '.index.json') if not os.path.isdir(dir_path): os.makedirs(dir_path) if not lock: lock = get_lock(dir_path) if config.locking: locks = [lock] with try_acquire_locks(locks, config.timeout): if force: index = {} else: try: mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'} with open(index_path, **mode_dict) as fi: index = json.load(fi) except (IOError, ValueError): index = {} files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2')) if could_be_mirror and any(fn.startswith('_license-') for fn in files): sys.exit("""\ Error: Indexing a copy of the Anaconda conda package channel is neither necessary nor supported. If you wish to add your own packages, you can do so by adding them to a separate channel. """) for fn in files: path = join(dir_path, fn) if fn in index: if check_md5: if index[fn]['md5'] == md5_file(path): continue elif index[fn]['mtime'] == getmtime(path): continue if config.verbose: print('updating:', fn) d = read_index_tar(path, config, lock=lock) d.update(file_info(path)) index[fn] = d for fn in files: index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None if remove: # remove files from the index which are not on disk for fn in set(index) - files: if config.verbose: print("removing:", fn) del index[fn] # Deal with Python 2 and 3's different json module type reqs mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'} with open(index_path, **mode_dict) as fo: json.dump(index, fo, indent=2, sort_keys=True, default=str) # --- new repodata for fn in index: info = index[fn] for varname in 'arch', 'platform', 'mtime', 'ucs': try: del info[varname] except KeyError: pass if 'requires' in info and 'depends' not in info: info['depends'] = info['requires'] repodata = {'packages': index, 'info': {}} write_repodata(repodata, dir_path, lock=lock, config=config)
def create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0, locks=None, is_cross=False): ''' Create a conda envrionment for the given prefix and specs. ''' if config.debug: external_logger_context = utils.LoggingContext(logging.DEBUG) else: external_logger_context = utils.LoggingContext(logging.ERROR) with external_logger_context: log = utils.get_logger(__name__) # if os.path.isdir(prefix): # utils.rm_rf(prefix) if specs_or_actions: # Don't waste time if there is nothing to do log.debug("Creating environment in %s", prefix) log.debug(str(specs_or_actions)) with utils.path_prepended(prefix): if not locks: locks = utils.get_conda_operation_locks(config) try: with utils.try_acquire_locks(locks, timeout=config.timeout): # input is a list - it's specs in MatchSpec format if not hasattr(specs_or_actions, 'keys'): specs = list(set(specs_or_actions)) actions = get_install_actions(prefix, tuple(specs), env, subdir=subdir, verbose=config.verbose, debug=config.debug, locking=config.locking, bldpkgs_dirs=tuple(config.bldpkgs_dirs), timeout=config.timeout, disable_pip=config.disable_pip, max_env_retry=config.max_env_retry, output_folder=config.output_folder, channel_urls=tuple(config.channel_urls)) else: actions = specs_or_actions index, index_ts = get_build_index(subdir=subdir, bldpkgs_dir=config.bldpkgs_dir, output_folder=config.output_folder, channel_urls=config.channel_urls, debug=config.debug, verbose=config.verbose, locking=config.locking, timeout=config.timeout) utils.trim_empty_keys(actions) display_actions(actions, index) if utils.on_win: for k, v in os.environ.items(): os.environ[k] = str(v) execute_actions(actions, index, verbose=config.debug) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError) as exc: if (("too short in" in str(exc) or re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or isinstance(exc, PaddingError)) and config.prefix_length > 80): if config.prefix_length_fallback: log.warn("Build prefix failed with prefix length %d", config.prefix_length) log.warn("Error was: ") log.warn(str(exc)) log.warn("One or more of your package dependencies needs to be rebuilt " "with a longer prefix length.") log.warn("Falling back to legacy prefix length of 80 characters.") log.warn("Your package will not install into prefixes > 80 characters.") config.prefix_length = 80 # Set this here and use to create environ # Setting this here is important because we use it below (symlink) prefix = config.build_prefix actions['PREFIX'] = prefix create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, is_cross=is_cross) else: raise elif 'lock' in str(exc): if retry < config.max_env_retry: log.warn("failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc)): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn("I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn("failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error("Failed to create env, max retries exceeded.") raise else: raise # HACK: some of the time, conda screws up somehow and incomplete packages result. # Just retry. except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc: if isinstance(exc, AssertionError): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = os.path.dirname(os.path.dirname(str(exc))) log.warn("I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn("failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error("Failed to create env, max retries exceeded.") raise # We must not symlink conda across different platforms when cross-compiling. # On second thought, I think we must, because activating the host env does # the symlink for us anyway, and when activate does it, we end up with # conda symlinks in every package. =() # if os.path.basename(prefix) == '_build_env' or not is_cross: if utils.on_win: shell = "cmd.exe" else: shell = "bash" symlink_conda(prefix, sys.prefix, shell)
def get_install_actions(prefix, specs, env, retries=0, subdir=None, verbose=True, debug=False, locking=True, bldpkgs_dirs=None, timeout=900, disable_pip=False, max_env_retry=3, output_folder=None, channel_urls=None): global cached_actions global last_index_ts actions = {} log = utils.get_logger(__name__) conda_log_level = logging.WARN specs = list(specs) if specs: specs.extend(create_default_packages) if verbose or debug: capture = contextlib.contextmanager(lambda: (yield)) if debug: conda_log_level = logging.DEBUG else: capture = utils.capture for feature, value in feature_list: if value: specs.append('%s@' % feature) bldpkgs_dirs = ensure_list(bldpkgs_dirs) index, index_ts, _ = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder, channel_urls=channel_urls, debug=debug, verbose=verbose, locking=locking, timeout=timeout) specs = tuple( utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@')) if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and last_index_ts >= index_ts): actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy() if "PREFIX" in actions: actions['PREFIX'] = prefix elif specs: # this is hiding output like: # Fetching package metadata ........... # Solving package specifications: .......... with utils.LoggingContext(conda_log_level): with capture(): try: actions = install_actions(prefix, index, specs, force=True) except (NoPackagesFoundError, UnsatisfiableError) as exc: raise DependencyNeedsBuildingError(exc, subdir=subdir) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError, AssertionError, BuildLockError) as exc: if 'lock' in str(exc): log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc) or isinstance(exc, AssertionError)): locks = utils.get_conda_operation_locks( locking, bldpkgs_dirs, timeout) with utils.try_acquire_locks(locks, timeout=timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retries < max_env_retry: log.warn( "failed to get install actions, retrying. exception was: %s", str(exc)) actions = get_install_actions( prefix, tuple(specs), env, retries=retries + 1, subdir=subdir, verbose=verbose, debug=debug, locking=locking, bldpkgs_dirs=tuple(bldpkgs_dirs), timeout=timeout, disable_pip=disable_pip, max_env_retry=max_env_retry, output_folder=output_folder, channel_urls=tuple(channel_urls)) else: log.error( "Failed to get install actions, max retries exceeded." ) raise if disable_pip: for pkg in ('pip', 'setuptools', 'wheel'): # specs are the raw specifications, not the conda-derived actual specs # We're testing that pip etc. are manually specified if not any( re.match(r'^%s(?:$|[\s=].*)' % pkg, str(dep)) for dep in specs): actions['LINK'] = [ spec for spec in actions['LINK'] if spec.name != pkg ] utils.trim_empty_keys(actions) cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy() last_index_ts = index_ts return actions
def create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0, locks=None, is_cross=False, is_conda=False): ''' Create a conda envrionment for the given prefix and specs. ''' if config.debug: external_logger_context = utils.LoggingContext(logging.DEBUG) else: external_logger_context = utils.LoggingContext(logging.WARN) if os.path.exists(prefix): for entry in glob(os.path.join(prefix, "*")): utils.rm_rf(entry) with external_logger_context: log = utils.get_logger(__name__) # if os.path.isdir(prefix): # utils.rm_rf(prefix) if specs_or_actions: # Don't waste time if there is nothing to do log.debug("Creating environment in %s", prefix) log.debug(str(specs_or_actions)) if not locks: locks = utils.get_conda_operation_locks(config) try: with utils.try_acquire_locks(locks, timeout=config.timeout): # input is a list - it's specs in MatchSpec format if not hasattr(specs_or_actions, 'keys'): specs = list(set(specs_or_actions)) actions = get_install_actions( prefix, tuple(specs), env, subdir=subdir, verbose=config.verbose, debug=config.debug, locking=config.locking, bldpkgs_dirs=tuple(config.bldpkgs_dirs), timeout=config.timeout, disable_pip=config.disable_pip, max_env_retry=config.max_env_retry, output_folder=config.output_folder, channel_urls=tuple(config.channel_urls)) else: actions = specs_or_actions index, _, _ = get_build_index( subdir=subdir, bldpkgs_dir=config.bldpkgs_dir, output_folder=config.output_folder, channel_urls=config.channel_urls, debug=config.debug, verbose=config.verbose, locking=config.locking, timeout=config.timeout) utils.trim_empty_keys(actions) display_actions(actions, index) if utils.on_win: for k, v in os.environ.items(): os.environ[k] = str(v) with env_var('CONDA_QUIET', not config.verbose, reset_context): with env_var('CONDA_JSON', not config.verbose, reset_context): execute_actions(actions, index) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError, BuildLockError) as exc: if (("too short in" in str(exc) or re.search( 'post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or isinstance(exc, PaddingError)) and config.prefix_length > 80): if config.prefix_length_fallback: log.warn("Build prefix failed with prefix length %d", config.prefix_length) log.warn("Error was: ") log.warn(str(exc)) log.warn( "One or more of your package dependencies needs to be rebuilt " "with a longer prefix length.") log.warn( "Falling back to legacy prefix length of 80 characters." ) log.warn( "Your package will not install into prefixes > 80 characters." ) config.prefix_length = 80 host = '_h_env' in prefix # Set this here and use to create environ # Setting this here is important because we use it below (symlink) prefix = config.host_prefix if host else config.build_prefix actions['PREFIX'] = prefix create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, is_cross=is_cross) else: raise elif 'lock' in str(exc): if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs_or_actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc)): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs_or_actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error( "Failed to create env, max retries exceeded.") raise else: raise # HACK: some of the time, conda screws up somehow and incomplete packages result. # Just retry. except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc: if isinstance(exc, AssertionError): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = os.path.dirname(os.path.dirname(str(exc))) log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, specs_or_actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error("Failed to create env, max retries exceeded.") raise
def create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0, locks=None, is_cross=False, always_include_files=[]): ''' Create a conda envrionment for the given prefix and specs. ''' if config.debug: external_logger_context = utils.LoggingContext(logging.DEBUG) else: external_logger_context = utils.LoggingContext(logging.ERROR) with external_logger_context: log = utils.get_logger(__name__) # if os.path.isdir(prefix): # utils.rm_rf(prefix) if specs_or_actions: # Don't waste time if there is nothing to do log.debug("Creating environment in %s", prefix) log.debug(str(specs_or_actions)) with utils.path_prepended(prefix): if not locks: locks = utils.get_conda_operation_locks(config) try: with utils.try_acquire_locks(locks, timeout=config.timeout): # input is a list - it's specs in MatchSpec format if not hasattr(specs_or_actions, 'keys'): specs = list(set(specs_or_actions)) actions = get_install_actions( prefix, tuple(specs), env, subdir=subdir, verbose=config.verbose, debug=config.debug, locking=config.locking, bldpkgs_dirs=tuple(config.bldpkgs_dirs), timeout=config.timeout, disable_pip=config.disable_pip, max_env_retry=config.max_env_retry, output_folder=config.output_folder, channel_urls=tuple(config.channel_urls)) else: actions = specs_or_actions index, index_ts = get_build_index( subdir=subdir, bldpkgs_dir=config.bldpkgs_dir, output_folder=config.output_folder, channel_urls=config.channel_urls, debug=config.debug, verbose=config.verbose, locking=config.locking, timeout=config.timeout) utils.trim_empty_keys(actions) display_actions(actions, index) if utils.on_win: for k, v in os.environ.items(): os.environ[k] = str(v) execute_actions(actions, index, verbose=config.debug) except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError, CondaError) as exc: if (("too short in" in str(exc) or re.search( 'post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or isinstance(exc, PaddingError)) and config.prefix_length > 80): if config.prefix_length_fallback: log.warn( "Build prefix failed with prefix length %d", config.prefix_length) log.warn("Error was: ") log.warn(str(exc)) log.warn( "One or more of your package dependencies needs to be rebuilt " "with a longer prefix length.") log.warn( "Falling back to legacy prefix length of 80 characters." ) log.warn( "Your package will not install into prefixes > 80 characters." ) config.prefix_length = 80 # Set this here and use to create environ # Setting this here is important because we use it below (symlink) prefix = config.build_prefix actions['PREFIX'] = prefix create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, is_cross=is_cross) else: raise elif 'lock' in str(exc): if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) elif ('requires a minimum conda version' in str(exc) or 'link a source that does not' in str(exc)): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = str(exc) folder = 0 while os.path.dirname( pkg_dir) not in pkgs_dirs and folder < 20: pkg_dir = os.path.dirname(pkg_dir) folder += 1 log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error( "Failed to create env, max retries exceeded.") raise else: raise # HACK: some of the time, conda screws up somehow and incomplete packages result. # Just retry. except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc: if isinstance(exc, AssertionError): with utils.try_acquire_locks(locks, timeout=config.timeout): pkg_dir = os.path.dirname(os.path.dirname( str(exc))) log.warn( "I think conda ended up with a partial extraction for %s. " "Removing the folder and retrying", pkg_dir) if os.path.isdir(pkg_dir): utils.rm_rf(pkg_dir) if retry < config.max_env_retry: log.warn( "failed to create env, retrying. exception was: %s", str(exc)) create_env(prefix, actions, config=config, subdir=subdir, env=env, clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross) else: log.error( "Failed to create env, max retries exceeded.") raise # We must not symlink conda across different platforms when cross-compiling. # On second thought, I think we must, because activating the host env does # the symlink for us anyway, and when activate does it, we end up with # conda symlinks in every package. =() # if os.path.basename(prefix) == '_build_env' or not is_cross: # Hack, do not SYMLINK_CONDA when we're building conda. if not any(include in ('bin/deactivate', 'Scripts/deactivate.bat') for include in always_include_files): if utils.on_win: shell = "cmd.exe" else: shell = "bash" symlink_conda(prefix, sys.prefix, shell)