Example #1
0
def test_get_lock(testing_workdir):
    lock1 = utils.get_lock(os.path.join(testing_workdir, 'lock1'))
    lock2 = utils.get_lock(os.path.join(testing_workdir, 'lock2'))

    # Different folders should get different lock files.
    assert lock1.lock_file != lock2.lock_file

    # Same folder should get the same lock file.
    lock1_copy = utils.get_lock(os.path.join(testing_workdir, 'lock1'))
    assert lock1.lock_file == lock1_copy.lock_file

    # ...even when not normalized
    lock1_unnormalized = utils.get_lock(os.path.join(testing_workdir, 'foo', '..', 'lock1'))
    assert lock1.lock_file == lock1_unnormalized.lock_file
Example #2
0
def test_get_lock(testing_workdir):
    lock1 = utils.get_lock(os.path.join(testing_workdir, 'lock1'))
    lock2 = utils.get_lock(os.path.join(testing_workdir, 'lock2'))

    # Different folders should get different lock files.
    assert lock1.lock_file != lock2.lock_file

    # Same folder should get the same lock file.
    lock1_copy = utils.get_lock(os.path.join(testing_workdir, 'lock1'))
    assert lock1.lock_file == lock1_copy.lock_file

    # ...even when not normalized
    lock1_unnormalized = utils.get_lock(os.path.join(testing_workdir, 'foo', '..', 'lock1'))
    assert lock1.lock_file == lock1_unnormalized.lock_file
Example #3
0
def clean_pkg_cache(dist, timeout):
    cc.pkgs_dirs = cc.pkgs_dirs[:1]
    locks = [get_lock(folder, timeout=timeout) for folder in cc.pkgs_dirs]
    with ExitStack() as stack:
        for lock in locks:
            stack.enter_context(lock)
        rmplan = [
            'RM_EXTRACTED {0} local::{0}'.format(dist),
            'RM_FETCHED {0} local::{0}'.format(dist),
        ]
        plan.execute_plan(rmplan)

        # Conda does not seem to do a complete cleanup sometimes.  This is supplemental.
        #   Conda's cleanup is still necessary - it keeps track of its own in-memory
        #   list of downloaded things.
        for folder in cc.pkgs_dirs:
            try:
                assert not os.path.exists(os.path.join(folder, dist))
                assert not os.path.exists(
                    os.path.join(folder, dist + '.tar.bz2'))
                for pkg_id in [dist, 'local::' + dist]:
                    assert pkg_id not in package_cache()
            except AssertionError:
                log.debug(
                    "Conda caching error: %s package remains in cache after removal",
                    dist)
                log.debug("Clearing package cache to compensate")
                cache = package_cache()
                keys = [key for key in cache.keys() if dist in key]
                for pkg_id in keys:
                    if pkg_id in cache:
                        del cache[pkg_id]
                for entry in glob(os.path.join(folder, dist + '*')):
                    rm_rf(entry)
Example #4
0
    def write_to_channel(self, dest):
        # Write the index to a channel. Useful to get conda to read it back in again
        # using conda.api.get_index().
        channel_subdir = os.path.join(dest, subdir)
        if not os.path.exists(channel_subdir):
            os.mkdir(channel_subdir)
        if hasattr(conda_build, 'api'):
            lock = get_lock(channel_subdir)
            write_repodata({'packages': self, 'info': {}}, channel_subdir, lock, config=conda_build.api.Config())
        else:
            write_repodata({'packages': self, 'info': {}}, channel_subdir)

        return channel_subdir
Example #5
0
def clean_pkg_cache(dist, config):
    locks = []

    conda_log_level = logging.WARN
    if config.debug:
        conda_log_level = logging.DEBUG

    _pkgs_dirs = pkgs_dirs[:1]
    if config.locking:
        locks = [
            utils.get_lock(folder, timeout=config.timeout)
            for folder in _pkgs_dirs
        ]
    with utils.LoggingContext(conda_log_level):
        with utils.try_acquire_locks(locks, timeout=config.timeout):
            rmplan = [
                'RM_EXTRACTED {0} local::{0}'.format(dist),
                'RM_FETCHED {0} local::{0}'.format(dist),
            ]
            execute_plan(rmplan)

            # Conda does not seem to do a complete cleanup sometimes.  This is supplemental.
            #   Conda's cleanup is still necessary - it keeps track of its own in-memory
            #   list of downloaded things.
            for folder in pkgs_dirs:
                try:
                    assert not os.path.exists(os.path.join(folder, dist))
                    assert not os.path.exists(
                        os.path.join(folder, dist + '.tar.bz2'))
                    for pkg_id in [dist, 'local::' + dist]:
                        assert pkg_id not in package_cache()
                except AssertionError:
                    log = utils.get_logger(__name__)
                    log.debug(
                        "Conda caching error: %s package remains in cache after removal",
                        dist)
                    log.debug("manually removing to compensate")
                    cache = package_cache()
                    keys = [key for key in cache.keys() if dist in key]
                    for pkg_id in keys:
                        if pkg_id in cache:
                            del cache[pkg_id]
                    for entry in glob(os.path.join(folder, dist + '*')):
                        utils.rm_rf(entry)
Example #6
0
def read_index_tar(tar_path, config, lock=None):
    """ Returns the index.json dict inside the given package tarball. """

    if not lock:
        lock = get_lock(os.path.dirname(tar_path), timeout=config.timeout)
    with ExitStack() as stack:
        stack.enter_context(lock)
        t = tarfile.open(tar_path)
        stack.enter_context(t)
        try:
            return json.loads(t.extractfile('info/index.json').read().decode('utf-8'))
        except EOFError:
            raise RuntimeError("Could not extract %s. File probably corrupt."
                % tar_path)
        except OSError as e:
            raise RuntimeError("Could not extract %s (%s)" % (tar_path, e))
        except tarfile.ReadError:
            raise RuntimeError("Could not extract metadata from %s. "
                            "File probably corrupt." % tar_path)
Example #7
0
    def write_to_channel(self, dest):
        # Write the index to a channel. Useful to get conda to read it back in again
        # using conda.api.get_index().
        channel_subdir = os.path.join(dest, conda.config.subdir)
        if not os.path.exists(channel_subdir):
            os.mkdir(channel_subdir)
        if hasattr(conda_build, 'api'):
            lock = get_lock(channel_subdir)
            write_repodata({
                'packages': self,
                'info': {}
            },
                           channel_subdir,
                           lock,
                           config=conda_build.api.Config())
        else:
            write_repodata({'packages': self, 'info': {}}, channel_subdir)

        return channel_subdir
Example #8
0
def read_index_tar(tar_path, config, lock=None):
    """ Returns the index.json dict inside the given package tarball. """

    if not lock:
        lock = get_lock(os.path.dirname(tar_path), timeout=config.timeout)
    with ExitStack() as stack:
        stack.enter_context(lock)
        t = tarfile.open(tar_path)
        stack.enter_context(t)
        try:
            return json.loads(
                t.extractfile('info/index.json').read().decode('utf-8'))
        except EOFError:
            raise RuntimeError("Could not extract %s. File probably corrupt." %
                               tar_path)
        except OSError as e:
            raise RuntimeError("Could not extract %s (%s)" % (tar_path, e))
        except tarfile.ReadError:
            raise RuntimeError("Could not extract metadata from %s. "
                               "File probably corrupt." % tar_path)
Example #9
0
def clean_pkg_cache(dist, config):
    locks = []

    conda_log_level = logging.WARN
    if config.debug:
        conda_log_level = logging.DEBUG

    _pkgs_dirs = pkgs_dirs[:1]
    if config.locking:
        locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]
    with utils.LoggingContext(conda_log_level):
        with utils.try_acquire_locks(locks, timeout=config.timeout):
            rmplan = [
                'RM_EXTRACTED {0} local::{0}'.format(dist),
                'RM_FETCHED {0} local::{0}'.format(dist),
            ]
            execute_plan(rmplan)

            # Conda does not seem to do a complete cleanup sometimes.  This is supplemental.
            #   Conda's cleanup is still necessary - it keeps track of its own in-memory
            #   list of downloaded things.
            for folder in pkgs_dirs:
                try:
                    assert not os.path.exists(os.path.join(folder, dist))
                    assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))
                    for pkg_id in [dist, 'local::' + dist]:
                        assert pkg_id not in package_cache()
                except AssertionError:
                    log = utils.get_logger(__name__)
                    log.debug("Conda caching error: %s package remains in cache after removal",
                              dist)
                    log.debug("manually removing to compensate")
                    cache = package_cache()
                    keys = [key for key in cache.keys() if dist in key]
                    for pkg_id in keys:
                        if pkg_id in cache:
                            del cache[pkg_id]
                    for entry in glob(os.path.join(folder, dist + '*')):
                        utils.rm_rf(entry)
Example #10
0
def get_pkg_dirs_locks(dirs, config):
    return [utils.get_lock(folder, timeout=config.timeout) for folder in dirs]
Example #11
0
def update_index(dir_path,
                 config,
                 force=False,
                 check_md5=False,
                 remove=True,
                 lock=None,
                 could_be_mirror=True):
    """
    Update all index files in dir_path with changed packages.

    :param verbose: Should detailed status messages be output?
    :type verbose: bool
    :param force: Whether to re-index all packages (including those that
                  haven't changed) or not.
    :type force: bool
    :param check_md5: Whether to check MD5s instead of mtimes for determining
                      if a package changed.
    :type check_md5: bool
    """

    log = utils.get_logger(__name__)

    log.debug("updating index in: %s", dir_path)
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

    index_path = join(dir_path, '.index.json')

    if not lock:
        lock = get_lock(dir_path)

    locks = []
    if config.locking:
        locks.append(lock)

    index = {}

    with try_acquire_locks(locks, config.timeout):
        if not force:
            try:
                mode_dict = {
                    'mode': 'r',
                    'encoding': 'utf-8'
                } if PY3 else {
                    'mode': 'rb'
                }
                with open(index_path, **mode_dict) as fi:
                    index = json.load(fi)
            except (IOError, ValueError):
                index = {}

        subdir = None

        files = set(fn for fn in os.listdir(dir_path)
                    if fn.endswith('.tar.bz2'))
        if could_be_mirror and any(fn.startswith('_license-') for fn in files):
            sys.exit("""\
    Error:
        Indexing a copy of the Anaconda conda package channel is neither
        necessary nor supported.  If you wish to add your own packages,
        you can do so by adding them to a separate channel.
    """)
        for fn in files:
            path = join(dir_path, fn)
            if fn in index:
                if check_md5:
                    if index[fn]['md5'] == md5_file(path):
                        continue
                elif index[fn]['mtime'] == getmtime(path):
                    continue
            if config.verbose:
                print('updating:', fn)
            d = read_index_tar(path, config, lock=lock)
            d.update(file_info(path))
            index[fn] = d
            # there's only one subdir for a given folder, so only read these contents once
            if not subdir:
                subdir = d['subdir']

        for fn in files:
            index[fn]['sig'] = '.' if isfile(join(dir_path, fn +
                                                  '.sig')) else None

        if remove:
            # remove files from the index which are not on disk
            for fn in set(index) - files:
                if config.verbose:
                    print("removing:", fn)
                del index[fn]

        # Deal with Python 2 and 3's different json module type reqs
        mode_dict = {
            'mode': 'w',
            'encoding': 'utf-8'
        } if PY3 else {
            'mode': 'wb'
        }
        with open(index_path, **mode_dict) as fo:
            json.dump(index, fo, indent=2, sort_keys=True, default=str)

        # --- new repodata
        for fn in index:
            info = index[fn]
            for varname in 'arch', 'platform', 'mtime', 'ucs':
                try:
                    del info[varname]
                except KeyError:
                    pass

            if 'requires' in info and 'depends' not in info:
                info['depends'] = info['requires']

        repodata = {'packages': index, 'info': {}}
        write_repodata(repodata, dir_path, lock=lock, config=config)
Example #12
0
def update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None,
                 could_be_mirror=True):
    """
    Update all index files in dir_path with changed packages.

    :param verbose: Should detailed status messages be output?
    :type verbose: bool
    :param force: Whether to re-index all packages (including those that
                  haven't changed) or not.
    :type force: bool
    :param check_md5: Whether to check MD5s instead of mtimes for determining
                      if a package changed.
    :type check_md5: bool
    """

    if config.verbose:
        print("updating index in:", dir_path)
    index_path = join(dir_path, '.index.json')
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

    if not lock:
        lock = get_lock(dir_path)

    with lock:
        if force:
            index = {}
        else:
            try:
                mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
                with open(index_path, **mode_dict) as fi:
                    index = json.load(fi)
            except (IOError, ValueError):
                index = {}

        files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))
        if could_be_mirror and any(fn.startswith('_license-') for fn in files):
            sys.exit("""\
    Error:
        Indexing a copy of the Anaconda conda package channel is neither
        necessary nor supported.  If you wish to add your own packages,
        you can do so by adding them to a separate channel.
    """)
        for fn in files:
            path = join(dir_path, fn)
            if fn in index:
                if check_md5:
                    if index[fn]['md5'] == md5_file(path):
                        continue
                elif index[fn]['mtime'] == getmtime(path):
                    continue
            if config.verbose:
                print('updating:', fn)
            d = read_index_tar(path, config, lock=lock)
            d.update(file_info(path))
            index[fn] = d

        for fn in files:
            index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None

        if remove:
            # remove files from the index which are not on disk
            for fn in set(index) - files:
                if config.verbose:
                    print("removing:", fn)
                del index[fn]

        # Deal with Python 2 and 3's different json module type reqs
        mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}
        with open(index_path, **mode_dict) as fo:
            json.dump(index, fo, indent=2, sort_keys=True, default=str)

        # --- new repodata
        for fn in index:
            info = index[fn]
            for varname in 'arch', 'platform', 'mtime', 'ucs':
                try:
                    del info[varname]
                except KeyError:
                    pass

            if 'requires' in info and 'depends' not in info:
                info['depends'] = info['requires']

        repodata = {'packages': index, 'info': {}}
        write_repodata(repodata, dir_path, lock=lock, config=config)
Example #13
0
def update_index(dir_path, force=False, check_md5=False, remove=True, lock=None,
                 could_be_mirror=True, verbose=True, locking=True, timeout=90):
    """
    Update all index files in dir_path with changed packages.

    :param verbose: Should detailed status messages be output?
    :type verbose: bool
    :param force: Whether to re-index all packages (including those that
                  haven't changed) or not.
    :type force: bool
    :param check_md5: Whether to check MD5s instead of mtimes for determining
                      if a package changed.
    :type check_md5: bool
    """

    log = utils.get_logger(__name__)

    log.debug("updating index in: %s", dir_path)
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

    index_path = join(dir_path, '.index.json')

    if not lock:
        lock = get_lock(dir_path)

    locks = []
    if locking:
        locks.append(lock)

    index = {}

    with try_acquire_locks(locks, timeout):
        if not force:
            try:
                mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
                with open(index_path, **mode_dict) as fi:
                    index = json.load(fi)
            except (IOError, ValueError):
                index = {}

        files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))
        for fn in files:
            path = join(dir_path, fn)
            if fn in index:
                if check_md5:
                    if index[fn]['md5'] == md5_file(path):
                        continue
                elif index[fn]['mtime'] == getmtime(path):
                    continue
            if verbose:
                print('updating:', fn)
            d = read_index_tar(path, lock=lock, locking=locking, timeout=timeout)
            d.update(file_info(path))
            index[fn] = d

        for fn in files:
            index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None

        if remove:
            # remove files from the index which are not on disk
            for fn in set(index) - files:
                if verbose:
                    print("removing:", fn)
                del index[fn]

        # Deal with Python 2 and 3's different json module type reqs
        mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}
        with open(index_path, **mode_dict) as fo:
            json.dump(index, fo, indent=2, sort_keys=True, default=str)

        # --- new repodata
        for fn in index:
            info = index[fn]
            for varname in 'arch', 'platform', 'mtime', 'ucs':
                try:
                    del info[varname]
                except KeyError:
                    pass

            if 'requires' in info and 'depends' not in info:
                info['depends'] = info['requires']

        repodata = {'packages': index, 'info': {}}
        write_repodata(repodata, dir_path, lock=lock, locking=locking, timeout=timeout)
Example #14
0
def create_env(prefix, specs, config, clear_cache=True):
    '''
    Create a conda envrionment for the given prefix and specs.
    '''
    if config.debug:
        logging.getLogger("conda").setLevel(logging.DEBUG)
        logging.getLogger("binstar").setLevel(logging.DEBUG)
        logging.getLogger("install").setLevel(logging.DEBUG)
        logging.getLogger("conda.install").setLevel(logging.DEBUG)
        logging.getLogger("fetch").setLevel(logging.DEBUG)
        logging.getLogger("print").setLevel(logging.DEBUG)
        logging.getLogger("progress").setLevel(logging.DEBUG)
        logging.getLogger("dotupdate").setLevel(logging.DEBUG)
        logging.getLogger("stdoutlog").setLevel(logging.DEBUG)
        logging.getLogger("requests").setLevel(logging.DEBUG)
    else:
        silence_loggers(show_warnings_and_errors=True)

    if os.path.isdir(prefix):
        rm_rf(prefix)

    specs = list(specs)
    for feature, value in feature_list:
        if value:
            specs.append('%s@' % feature)

    if specs:  # Don't waste time if there is nothing to do
        with path_prepended(prefix):
            locks = []
            try:
                cc.pkgs_dirs = cc.pkgs_dirs[:1]
                locked_folders = cc.pkgs_dirs + list(config.bldpkgs_dirs)
                for folder in locked_folders:
                    if not os.path.isdir(folder):
                        os.makedirs(folder)
                    lock = get_lock(folder, timeout=config.timeout)
                    if not folder.endswith('pkgs'):
                        update_index(folder,
                                     config=config,
                                     lock=lock,
                                     could_be_mirror=False)
                    locks.append(lock)

                with ExitStack() as stack:
                    for lock in locks:
                        stack.enter_context(lock)
                    index = get_build_index(config=config, clear_cache=True)

                    actions = plan.install_actions(prefix, index, specs)
                    if config.disable_pip:
                        actions['LINK'] = [
                            spec for spec in actions['LINK']
                            if not spec.startswith('pip-')
                        ]  # noqa
                        actions['LINK'] = [
                            spec for spec in actions['LINK']
                            if not spec.startswith('setuptools-')
                        ]  # noqa
                    plan.display_actions(actions, index)
                    if on_win:
                        for k, v in os.environ.items():
                            os.environ[k] = str(v)
                    plan.execute_actions(actions, index, verbose=config.debug)
            except (SystemExit, PaddingError, LinkError) as exc:
                if (("too short in" in str(exc)
                     or 'post-link failed for: openssl' in str(exc)
                     or isinstance(exc, PaddingError))
                        and config.prefix_length > 80):
                    log.warn("Build prefix failed with prefix length %d",
                             config.prefix_length)
                    log.warn("Error was: ")
                    log.warn(str(exc))
                    log.warn(
                        "One or more of your package dependencies needs to be rebuilt "
                        "with a longer prefix length.")
                    log.warn(
                        "Falling back to legacy prefix length of 80 characters."
                    )
                    log.warn(
                        "Your package will not install into prefixes > 80 characters."
                    )
                    config.prefix_length = 80

                    # Set this here and use to create environ
                    #   Setting this here is important because we use it below (symlink)
                    prefix = config.build_prefix

                    create_env(prefix,
                               specs,
                               config=config,
                               clear_cache=clear_cache)
        warn_on_old_conda_build(index=index)

    # ensure prefix exists, even if empty, i.e. when specs are empty
    if not isdir(prefix):
        os.makedirs(prefix)
    if on_win:
        shell = "cmd.exe"
    else:
        shell = "bash"
    symlink_conda(prefix, sys.prefix, shell)
Example #15
0
def update_index(dir_path,
                 force=False,
                 check_md5=False,
                 remove=True,
                 lock=None,
                 could_be_mirror=True,
                 verbose=True,
                 locking=True,
                 timeout=90):
    """
    Update all index files in dir_path with changed packages.

    :param verbose: Should detailed status messages be output?
    :type verbose: bool
    :param force: Whether to re-index all packages (including those that
                  haven't changed) or not.
    :type force: bool
    :param check_md5: Whether to check MD5s instead of mtimes for determining
                      if a package changed.
    :type check_md5: bool
    """

    log = utils.get_logger(__name__)

    log.debug("updating index in: %s", dir_path)
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

    index_path = join(dir_path, '.index.json')

    if not lock:
        lock = get_lock(dir_path)

    locks = []
    if locking:
        locks.append(lock)

    index = {}

    with try_acquire_locks(locks, timeout):
        if not force:
            try:
                mode_dict = {
                    'mode': 'r',
                    'encoding': 'utf-8'
                } if PY3 else {
                    'mode': 'rb'
                }
                with open(index_path, **mode_dict) as fi:
                    index = json.load(fi)
            except (IOError, ValueError):
                index = {}

        files = set(fn for fn in os.listdir(dir_path)
                    if fn.endswith('.tar.bz2'))
        for fn in files:
            path = join(dir_path, fn)
            if fn in index:
                if check_md5:
                    if index[fn]['md5'] == md5_file(path):
                        continue
                elif index[fn]['mtime'] == getmtime(path):
                    continue
            if verbose:
                print('updating:', fn)
            d = read_index_tar(path,
                               lock=lock,
                               locking=locking,
                               timeout=timeout)
            d.update(file_info(path))
            index[fn] = d

        for fn in files:
            index[fn]['sig'] = '.' if isfile(join(dir_path, fn +
                                                  '.sig')) else None

        if remove:
            # remove files from the index which are not on disk
            for fn in set(index) - files:
                if verbose:
                    print("removing:", fn)
                del index[fn]

        # Deal with Python 2 and 3's different json module type reqs
        mode_dict = {
            'mode': 'w',
            'encoding': 'utf-8'
        } if PY3 else {
            'mode': 'wb'
        }
        with open(index_path, **mode_dict) as fo:
            json.dump(index, fo, indent=2, sort_keys=True, default=str)

        # --- new repodata
        for fn in index:
            info = index[fn]
            for varname in 'arch', 'platform', 'mtime', 'ucs':
                try:
                    del info[varname]
                except KeyError:
                    pass

            if 'requires' in info and 'depends' not in info:
                info['depends'] = info['requires']

        repodata = {'packages': index, 'info': {}}
        write_repodata(repodata,
                       dir_path,
                       lock=lock,
                       locking=locking,
                       timeout=timeout)