예제 #1
0
def find_files(file_path, extension):
    """'
     Recursively find files at path with extension; pulled from StackOverflow
    ''"""#
    for root, dirs, files in walk(file_path):
        for file in fnmatch_filter(files, extension):
            yield path.join(root, file)
예제 #2
0
파일: python.py 프로젝트: makbigc/conda
def get_dist_file_from_egg_link(egg_link_file, prefix_path):
    """
    Return the egg info file path following an egg link.
    """
    egg_info_full_path = None

    egg_link_path = join(prefix_path, win_path_ok(egg_link_file))
    try:
        with open(egg_link_path) as fh:
            # See: https://setuptools.readthedocs.io/en/latest/formats.html#egg-links
            # "...Each egg-link file should contain a single file or directory name
            # with no newlines..."
            egg_link_contents = fh.readlines()[0].strip()
    except UnicodeDecodeError:
        from locale import getpreferredencoding
        with open(egg_link_path, encoding=getpreferredencoding()) as fh:
            egg_link_contents = fh.readlines()[0].strip()

    if lexists(egg_link_contents):
        egg_info_fnames = fnmatch_filter(listdir(egg_link_contents),
                                         '*.egg-info')
    else:
        egg_info_fnames = ()

    if egg_info_fnames:
        assert len(egg_info_fnames) == 1, (egg_link_file, egg_info_fnames)
        egg_info_full_path = join(egg_link_contents, egg_info_fnames[0])

        if isdir(egg_info_full_path):
            egg_info_full_path = join(egg_info_full_path, "PKG-INFO")

    if egg_info_full_path is None:
        raise EnvironmentError(ENOENT, strerror(ENOENT), egg_link_contents)

    return egg_info_full_path
예제 #3
0
파일: python.py 프로젝트: conda/conda
def get_dist_file_from_egg_link(egg_link_file, prefix_path):
    """
    Return the egg info file path following an egg link.
    """
    egg_info_full_path = None

    egg_link_path = join(prefix_path, win_path_ok(egg_link_file))
    try:
        with open(egg_link_path) as fh:
            # See: https://setuptools.readthedocs.io/en/latest/formats.html#egg-links
            # "...Each egg-link file should contain a single file or directory name
            # with no newlines..."
            egg_link_contents = fh.readlines()[0].strip()
    except UnicodeDecodeError:
        from locale import getpreferredencoding
        with open(egg_link_path, encoding=getpreferredencoding()) as fh:
            egg_link_contents = fh.readlines()[0].strip()

    if lexists(egg_link_contents):
        egg_info_fnames = fnmatch_filter(listdir(egg_link_contents), '*.egg-info')
    else:
        egg_info_fnames = ()

    if egg_info_fnames:
        assert len(egg_info_fnames) == 1, (egg_link_file, egg_info_fnames)
        egg_info_full_path = join(egg_link_contents, egg_info_fnames[0])

        if isdir(egg_info_full_path):
            egg_info_full_path = join(egg_info_full_path, "PKG-INFO")

    if egg_info_full_path is None:
        raise EnvironmentError(ENOENT, strerror(ENOENT), egg_link_contents)

    return egg_info_full_path
예제 #4
0
 def load(self):
     self.__prefix_records = {}
     _conda_meta_dir = join(self.prefix_path, 'conda-meta')
     if lexists(_conda_meta_dir):
         for meta_file in fnmatch_filter(listdir(_conda_meta_dir), '*.json'):
             self._load_single_record(join(_conda_meta_dir, meta_file))
     if self._pip_interop_enabled:
         self._load_site_packages()
예제 #5
0
 def load(self):
     self.__prefix_records = {}
     _conda_meta_dir = join(self.prefix_path, 'conda-meta')
     if lexists(_conda_meta_dir):
         for meta_file in fnmatch_filter(listdir(_conda_meta_dir), '*.json'):
             self._load_single_record(join(_conda_meta_dir, meta_file))
     if self._pip_interop_enabled:
         self._load_site_packages()
예제 #6
0
 def create_source_grid(self, ncfile_in: str) -> "ESMF.Grid":
     # Create source grid from a SCRIP formatted file. As ESMF needs one
     # file rather than an MFDataset, give first file in directory.
     flist = np.sort(
         fnmatch_filter(listdir(self.input_dir), path.basename(ncfile_in)))
     ncsingle = path.join(self.input_dir, flist[0])
     sgrid = ESMF.Grid(filename=ncsingle, filetype=ESMF.FileFormat.GRIDSPEC)
     return sgrid
예제 #7
0
파일: list.py 프로젝트: lpirl/yana
    def find(self, args, queries, found_path_callback):
        """
        Returns all paths that were listed last time and that match
        patterns specified in ``queries``.
        """

        for query in queries:
            for matched_path in fnmatch_filter(self.last_listed_paths, query):
                logging.info("found by match in cache: %s", matched_path)
                found_path_callback(matched_path)
예제 #8
0
파일: list.py 프로젝트: lpirl/yana
    def find(self, args, queries, found_path_callback):
        """
        Returns all paths that were listed last time and that match
        patterns specified in ``queries``.
        """

        for query in queries:
            for matched_path in fnmatch_filter(self.last_listed_paths, query):
                logging.info("found by match in cache: %s", matched_path)
                found_path_callback(matched_path)
예제 #9
0
def get_dist_file_from_egg_link(egg_link_file, prefix_path):
    """
    Return the egg info file path following an egg link.
    """
    egg_info_full_path = None

    egg_link_path = join(prefix_path, win_path_ok(egg_link_file))
    try:
        with open(egg_link_path) as fh:
            # See: https://setuptools.readthedocs.io/en/latest/formats.html#egg-links
            # "...Each egg-link file should contain a single file or directory name
            # with no newlines..."
            egg_link_contents = fh.readlines()[0].strip()
    except UnicodeDecodeError:
        from locale import getpreferredencoding
        with open(egg_link_path, encoding=getpreferredencoding()) as fh:
            egg_link_contents = fh.readlines()[0].strip()

    if lexists(egg_link_contents):
        egg_info_fnames = fnmatch_filter(listdir(egg_link_contents),
                                         '*.egg-info')
    else:
        egg_info_fnames = ()

    if egg_info_fnames:
        if len(egg_info_fnames) != 1:
            raise CondaError(
                "Expected exactly one `egg-info` directory in '{}', via egg-link '{}'."
                " Instead found: {}.  These are often left over from "
                "legacy operations that did not clean up correctly.  Please "
                "remove all but one of these.".format(egg_link_contents,
                                                      egg_link_file,
                                                      egg_info_fnames))

        egg_info_full_path = join(egg_link_contents, egg_info_fnames[0])

        if isdir(egg_info_full_path):
            egg_info_full_path = join(egg_info_full_path, "PKG-INFO")

    if egg_info_full_path is None:
        raise EnvironmentError(ENOENT, strerror(ENOENT), egg_link_contents)

    return egg_info_full_path
예제 #10
0
    def fill_menu(self, popup, url):
        """ Fill menu entries with folder contents. """
        sfa = self.sfa
        base_url = url
        if not base_url.endswith("/"):
            base_url += "/"
        names = self.sfa.getFolderContents(base_url, True)
        if not self.hidden:
            names = set([
                name for name in names
                if sfa.exists(name) and not sfa.isHidden(name)
            ])
        else:
            names = set(names)
        folders = set([name for name in names if sfa.isFolder(name)])
        files = names - folders
        if self.filter:
            _files = set()
            for _f in self.filter:
                _files.update(fnmatch_filter(files, _f))
            files = _files
        id = 1
        for name in folders:
            popup.insertItem(id, unquote(basename(name)), 0, id - 1)
            popup.setCommand(id, name)
            popup.setPopupMenu(id, self.create_sub_popup())
            id += 1
        pos = len(folders)
        _files = list(files)
        _files.sort()
        for name in _files:
            popup.insertItem(id, unquote(basename(name)), 0, id - 1)
            popup.setCommand(id, name)
            id += 1

        if len(_files):
            n = popup.getItemCount()
            popup.insertSeparator(n)
            popup.insertItem(self.OPEN_ALL_ID, self._label_open_all, 0, n + 1)
예제 #11
0
    def fill_menu(self, popup, url):
        """ Fill menu entries with folder contents. """
        sfa = self.sfa
        base_url = url
        if not base_url.endswith("/"):
            base_url += "/"
        names = self.sfa.getFolderContents(base_url, True)
        if not self.hidden:
            names = set([name for name in names if sfa.exists(name) and not sfa.isHidden(name)])
        else:
            names = set(names)
        folders = set([name for name in names if sfa.isFolder(name)])
        files = names - folders
        if self.filter:
            _files = set()
            for _f in self.filter:
                _files.update(fnmatch_filter(files, _f))
            files = _files
        id = 1
        for name in folders:
            popup.insertItem(id, unquote(basename(name)), 0, id - 1)
            popup.setCommand(id, name)
            popup.setPopupMenu(id, self.create_sub_popup())
            id += 1
        pos = len(folders)
        _files = list(files)
        _files.sort()
        for name in _files:
            popup.insertItem(id, unquote(basename(name)), 0, id - 1)
            popup.setCommand(id, name)
            id += 1

        if len(_files):
            n = popup.getItemCount()
            popup.insertSeparator(n)
            popup.insertItem(self.OPEN_ALL_ID, self._label_open_all, 0, n + 1)
예제 #12
0
def _populate_imgs(img_directory, skip_save=True):
    global pickled_cache
    cache.update_locals(pickled_cache, locals())

    tbl = pickled_cache["tbl"]

    if "id2ideyefname" not in pickled_cache or not len(
            pickled_cache["id2ideyefname"]):
        if not path.isdir(img_directory) or not path.isdir(
                path.join(img_directory, "BMES1Images")):
            raise OSError(
                "{} must exist and contain the images".format(img_directory))

        pickled_cache["all_imgs"] = all_imgs = tuple(
            sorted(
                (IdEyeFname(*(lambda fname: (
                    fname[:-1],
                    fname[-1],
                    path.join(root, filename),
                ))(filename[filename.rfind("BMES") + len("BMES") +
                            1:filename.rfind("-")].partition("-")[0]))
                 for root, dirnames, filenames in walk(img_directory)
                 for filename in fnmatch_filter(filenames, "*.jpg")),
                key=itemgetter(0),
            ))  # type: [IdEyeFname]
        assert len(all_imgs) > 0

        pickled_cache["id2ideyefname"] = id2ideyefname = {
            key: d[key]
            for d in tuple(
                {_id: tuple(group)}
                for _id, group in groupby(all_imgs, key=itemgetter(0)))
            for key in d
        }  # type: {str: [IdEyeFname]}

        pickled_cache["total_imgs_assoc_to_id"] = total_imgs_assoc_to_id = sum(
            len(v) for k, v in iteritems(id2ideyefname))
        pickled_cache["tbl"] = tbl = {
            id_: RecImg(recimg.rec, id2ideyefname[id_])
            for id_, recimg in iteritems(tbl) if id_ in id2ideyefname
        }
        assert len(tbl.keys()) > 0

    else:
        id2ideyefname = pickled_cache["id2ideyefname"]
        all_imgs = pickled_cache["all_imgs"]
        total_imgs_assoc_to_id = pickled_cache["total_imgs_assoc_to_id"]

    if "imgs_to_id" not in pickled_cache or not len(
            pickled_cache["imgs_to_id"]):
        pickled_cache["imgs_to_id"] = imgs_to_id = {
            ideyefname.fname: id_
            for id_, ideyefnames in iteritems(id2ideyefname)
            for ideyefname in ideyefnames
        }
        assert len(imgs_to_id) > 0
    else:
        imgs_to_id = pickled_cache["imgs_to_id"]

    if "total_imgs" not in pickled_cache or not len(
            pickled_cache["total_imgs"]):
        pickled_cache["total_imgs"] = total_imgs = sum(
            sum(1 for fname in filenames if fname.endswith(".jpg"))
            for root, dirnames, filenames in walk(img_directory))
        assert total_imgs > 0
    else:
        total_imgs = pickled_cache["total_imgs"]  # type: int

    # Arghh, where are my views/slices?
    if not skip_save:
        logger.debug("saving in _populate_imgs")
        cache.save(pickled_cache)

    logger.debug(
        "total_imgs == len(all_imgs) == len(imgs_to_id):".ljust(just) +
        "{}".format(total_imgs == len(all_imgs) == len(imgs_to_id)))
    logger.debug("# not allocated in tbl:".ljust(just) +
                 "{}".format(len(id2ideyefname) - len(tbl)))

    return pickled_cache
예제 #13
0
    def _load_site_packages(self):
        python_record = next(
            (prefix_rec for prefix_rec in itervalues(self.__prefix_records)
             if prefix_rec.name == 'python'), None)
        if not python_record:
            return
        prefix_graph = PrefixGraph(self.iter_records())
        known_python_records = prefix_graph.all_descendants(python_record)

        def norm_package_name(name):
            return name.replace('.', '-').replace('_', '-').lower()

        anchor_file_endings = ('.egg-info/PKG-INFO', '.dist-info/RECORD',
                               '.egg-info')
        conda_python_packages = dict(
            ((af, prefix_rec) for prefix_rec in known_python_records
             for af in prefix_rec.files
             if af.endswith(anchor_file_endings) and 'site-packages' in af))

        all_sp_anchor_files = set()
        site_packages_dir = get_python_site_packages_short_path(
            python_record.version)
        sp_dir_full_path = join(self.prefix_path,
                                win_path_ok(site_packages_dir))
        sp_anchor_endings = ('.dist-info', '.egg-info', '.egg-link')
        if not isdir(sp_dir_full_path):
            return
        for fn in listdir(sp_dir_full_path):
            if fn.endswith(sp_anchor_endings):
                if fn.endswith('.dist-info'):
                    anchor_file = "%s/%s/%s" % (site_packages_dir, fn,
                                                'RECORD')
                elif fn.endswith(".egg-info"):
                    if isfile(join(sp_dir_full_path, fn)):
                        anchor_file = "%s/%s" % (site_packages_dir, fn)
                    else:
                        anchor_file = "%s/%s/%s" % (site_packages_dir, fn,
                                                    "PKG-INFO")
                elif fn.endswith('.egg-link'):
                    anchor_file = "%s/%s" % (site_packages_dir, fn)
                elif fn.endswith('.pth'):
                    continue
                else:
                    continue
                all_sp_anchor_files.add(anchor_file)

        _conda_anchor_files = set(conda_python_packages)
        clobbered_conda_anchor_files = _conda_anchor_files - all_sp_anchor_files
        non_conda_anchor_files = all_sp_anchor_files - _conda_anchor_files

        # If there's a mismatch for anchor files between what conda expects for a package
        # based on conda-meta, and for what is actually in site-packages, then we'll delete
        # the in-memory record for the conda package.  In the future, we should consider
        # also deleting the record on disk in the conda-meta/ directory.
        for conda_anchor_file in clobbered_conda_anchor_files:
            del self._prefix_records[
                conda_python_packages[conda_anchor_file].name]

        # TODO: only compatible with pip 9.0; consider writing this by hand
        from pip._vendor.distlib.database import EggInfoDistribution, InstalledDistribution
        from pip._vendor.distlib.metadata import MetadataConflictError
        from pip._vendor.distlib.util import parse_requirement

        def get_pydist(anchor_file):
            if ".dist-info" in anchor_file:
                sp_reference = basename(dirname(anchor_file))
                dist_file = join(self.prefix_path,
                                 win_path_ok(dirname(anchor_file)))
                dist_cls = InstalledDistribution
                package_type = PackageType.SHADOW_PYTHON_DIST_INFO
            elif anchor_file.endswith(".egg-info"):
                sp_reference = basename(anchor_file)
                dist_file = join(self.prefix_path, win_path_ok(anchor_file))
                dist_cls = EggInfoDistribution
                package_type = PackageType.SHADOW_PYTHON_EGG_INFO_FILE
            elif ".egg-info" in anchor_file:
                sp_reference = basename(dirname(anchor_file))
                dist_file = join(self.prefix_path,
                                 win_path_ok(dirname(anchor_file)))
                dist_cls = EggInfoDistribution
                package_type = PackageType.SHADOW_PYTHON_EGG_INFO_DIR
            elif anchor_file.endswith(".egg-link"):
                raise NotImplementedError()
            else:
                raise NotImplementedError()
            try:
                pydist = dist_cls(dist_file)
            except MetadataConflictError:
                print("MetadataConflictError:", anchor_file)
                pydist = None
            return package_type, sp_reference, pydist

        def get_python_rec(anchor_file):
            package_type, sp_reference, pydist = get_pydist(anchor_file)
            if pydist is None:
                return None
            # x.provides  =>  [u'skdata (0.0.4)']
            # x.run_requires  =>  set([u'joblib', u'scikit-learn', u'lockfile', u'numpy', u'nose (>=1.0)'])  # NOQA
            # >>> list(x.list_installed_files())  =>  [(u'skdata/__init__.py', u'sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU', u'0'), (u'skdata/base.py', u'sha256=04MW02dky5T4nZb6Q0M351aRbAwLxd8voCK3nrAU-g0', u'5019'), (u'skdata/brodatz.py', u'sha256=NIPWLawJ59Fr037r0oT_gHe46WCo3UivuQ-cwxRU3ow', u'8492'), (u'skdata/caltech.py', u'sha256=cIfyMMRYggZ3Jkgc15tsYi_ZsZ7NpRqWh7mZ8bl6Fo0', u'8047'), (u'skdata/data_home.py', u'sha256=o5ChOI4v3Jd16JM3qWZlhrs5q-g_0yKa5-Oq44HC_K4', u'1297'), (u'skdata/diabetes.py', u'sha256=ny5Ihpc_eiIRYgzFn3Lm81fV0SZ1nyZQnqEmwb2PrS0', u'995'), (u'skdata/digits.py', u'sha256=DipeWAb3APpjXfmKmSumkfEFzuBW8XJ0  # NOQA

            # TODO: normalize names against '.', '-', '_'
            # TODO: ensure that this dist is *actually* the dist that matches conda-meta

            if package_type == PackageType.SHADOW_PYTHON_EGG_INFO_FILE:
                paths_data = None
            elif package_type == PackageType.SHADOW_PYTHON_DIST_INFO:
                _paths_data = []
                for _path, _hash, _size in pydist.list_installed_files():
                    if _hash:
                        assert _hash.startswith('sha256='), (anchor_file,
                                                             _hash)
                        sha256 = _hash[7:]
                    else:
                        sha256 = None
                    _size = int(_size) if _size else None
                    _paths_data.append(
                        PathDataV1(_path=_path,
                                   path_type=PathType.hardlink,
                                   sha256=sha256,
                                   size_in_bytes=_size))
                paths_data = PathsData(paths_version=1, paths=_paths_data)
            elif package_type == PackageType.SHADOW_PYTHON_EGG_INFO_DIR:
                _paths_data = []
                # TODO: Don't use list_installed_files() here. Read SOURCES.txt directly.
                for _path, _, _ in pydist.list_installed_files():
                    _paths_data.append(
                        PathData(
                            _path=_path,
                            path_type=PathType.hardlink,
                        ))
                paths_data = PathsData(paths_version=1, paths=_paths_data)
            else:
                raise NotImplementedError()

            # TODO: need to add entry points, "exports," and other files that might not be in RECORD  # NOQA

            depends = tuple(
                req.name for req in
                # vars(req) => {'source': u'nose (>=1.0)', 'requirement': u'nose (>= 1.0)', 'extras': None, 'name': u'nose', 'url': None, 'constraints': [(u'>=', u'1.0')]}  # NOQA
                (parse_requirement(r) for r in pydist.run_requires))
            # TODO: need to add python (with version?) to deps

            python_rec = PrefixRecord(
                package_type=package_type,
                namespace='python',
                name=pydist.name.lower(),
                version=pydist.version,
                channel=Channel('pypi'),
                subdir='pypi',
                fn=sp_reference,
                build='pypi_0',
                build_number=0,
                paths_data=paths_data,
                depends=depends,
            )
            return python_rec

        egg_link_files = []
        for anchor_file in non_conda_anchor_files:
            if anchor_file.endswith('.egg-link'):
                egg_link_files.append(anchor_file)
                continue
            python_rec = get_python_rec(anchor_file)
            self.__prefix_records[python_rec.name] = python_rec

        for egg_link_file in egg_link_files:
            with open(join(self.prefix_path,
                           win_path_ok(egg_link_file))) as fh:
                egg_link_contents = fh.readlines()[0].strip()
            if lexists(egg_link_contents):
                egg_info_fns = fnmatch_filter(listdir(egg_link_contents),
                                              '*.egg-info')
            else:
                egg_info_fns = ()
            if not egg_info_fns:
                continue
            assert len(egg_info_fns) == 1, (egg_link_file, egg_info_fns)
            egg_info_full_path = join(egg_link_contents, egg_info_fns[0])
            if isdir(egg_info_full_path):
                egg_info_full_path = join(egg_info_full_path, "PKG-INFO")
            python_rec = get_python_rec(egg_info_full_path)
            python_rec.package_type = PackageType.SHADOW_PYTHON_EGG_LINK
            self.__prefix_records[python_rec.name] = python_rec