Example #1
0
def main():
    chapter_files, other_files = get_filenames()

    # make previous of first file and next of last file to just bring
    # back to README
    prevs = ['README.md'] + chapter_files[:-1]
    nexts = chapter_files[1:] + ['README.md']

    print("Chapter files:")
    for prevpath, thispath, nextpath in zip(prevs, chapter_files, nexts):
        # all paths should be like 'section/file.md'
        where = posixpath.dirname(thispath)
        prev = posixpath.relpath(prevpath, where)
        next_ = posixpath.relpath(nextpath, where)
        extralinks = "[Previous](%s) | [Next](%s) |\n" % (prev, next_)
        end = END_TEMPLATE.format(
            toplevel='..', extralinks=extralinks, readmeheader=where)
        update_end(thispath, end)

    print()

    print("Other files:")
    for filename in other_files:
        where = posixpath.dirname(filename)
        end = END_TEMPLATE.format(
            toplevel=posixpath.relpath('.', where),
            extralinks="", readmeheader='list-of-contents')
        update_end(filename, end)
    def __iter__(self):
        for item in self.previous:
            yield item

        cwd = os.getcwd()
        yield {self.pathkey: posixpath.sep,
               self.typekey: self.foldertype}

        try:
            os.chdir(self.dirname)
            for (dirpath, dirnames, filenames) in os.walk(os.curdir):
                os.chdir(cwd)

                # Convert path from os.path to posixpath
                dirpath = posixpath.join(*pathsplit(dirpath, ospath=os.path))

                def sortkey(basename, dirpath=dirpath, sortkey=self.sortkey):
                    return sortkey({}, dirpath=dirpath, basename=basename)

                for basename in sorted(filenames, key=sortkey):
                    yield {self.pathkey: posixpath.relpath(
                        posixpath.join(posixpath.sep, dirpath, basename),
                        posixpath.sep)}
                for basename in sorted(dirnames, key=sortkey):
                    yield {
                        self.pathkey: posixpath.relpath(
                            posixpath.join(posixpath.sep, dirpath, basename),
                            posixpath.sep) + posixpath.sep,
                        self.typekey: self.foldertype}

                os.chdir(self.dirname)

        finally:
            os.chdir(cwd)
Example #3
0
    def scan(self):
        if not self.fs:
            self.attachFileSystem(self.basepath)
        
        self._dirs = dict()
        self._files = dict()

        for root, _dirs, _files in self.fs.walk(self.fs.basepath):
            for _dir in _dirs:
                path = os.path.join(root, _dir).replace("\\", "/")
                
                stat = self.fs.stat(path)

                path = posixpath.relpath(path, self.fs.basepath)

                self._dirs[path] = {
                    "size": stat.st_size,
                    "mdate": stat.st_mtime
                }

            for _file in _files:
                path = os.path.join(root, _file).replace("\\", "/")

                stat = self.fs.stat(path)

                path = posixpath.relpath(path, self.fs.basepath)

                self._files[path] = {
                    "size": stat.st_size,
                    "mdate": stat.st_mtime
                }
        
        return self
    def run(self):
        # MOE needs to be invoked from the directory containing the SVL scripts, otherwise it can't seem to
        # "find" the other SVL files. It would be nice to eliminate this issue and use absolute paths.
        #lex = shlex.shlex(
        #    'moe -load "{script}" -exec "HomologyBatch [\'{input}\']"'.format(
        #        script=self.args['svl_script_name'],  # The file will be accessed from the parent dir.
        #        # MOE only accepts POSIX-like file paths as SVL function arguments.
        #        input=posixpath.relpath(self.args['input_directory'], start=self.args['svl_directory'])
        #    )
        #)
        #lex.whitespace_split = True
        #process_args = list(lex)
        #check_call(process_args, stdout=PIPE, cwd=self.args['svl_directory'])

        process_args = 'moebatch -run "{script}" -options "{options}" -template "{template}" -sequence "{sequence}" -out "{outDir}"'.format(
            script=self.args['svl_script_name'],  # The file will be accessed from the parent dir.
            options=posixpath.relpath(self.args['homology_options'], start=self.args['svl_directory']),
            template=posixpath.relpath(self.args['template_file'], start=self.args['svl_directory']),
            sequence=posixpath.relpath(self.args['sequence_file'], start=self.args['svl_directory']),
            outDir=posixpath.relpath(self.args['outputDir'], start=self.args['svl_directory'])
        )
        try:
            # This script currently outputs the homology model files in the directory where it was invoked.
            # Call the script from the output directory.
            check_call(process_args, stdout=PIPE, shell=True, cwd=self.args['svl_directory'])
        except CalledProcessError as e:
            # For some reason, moebatch seems to always return 1.
            if e.returncode != 1:  # Ignore a return code of 1.
                raise e
Example #5
0
 def diff(self, relative = False):
     (d, f) = self.scan()
     ef = f - self.__of
     mf = self.__of - f
     ed = d - self.__od
     md = self.__od - d
     if relative:
         ef = set([posixpath.relpath(x, self.__path) for x in ef])
         mf = set([posixpath.relpath(x, self.__path) for x in mf])
         ed = set([posixpath.relpath(x, self.__path) for x in ed])
         md = set([posixpath.relpath(x, self.__path) for x in md])
     return ed, ef, md, mf
Example #6
0
def get_jottapath(localtopdir, dirpath, jottamountpoint):
    """Translate localtopdir to jottapath. Returns unicode string"""
    log.debug("get_jottapath %r %r %r", localtopdir, dirpath, jottamountpoint)
    normpath = posixpath.normpath(
        posixpath.join(jottamountpoint, posixpath.basename(localtopdir), posixpath.relpath(dirpath, localtopdir))
    )
    return _decode_filename_to_unicode(normpath)
Example #7
0
 def get_path_components(self, relative_to=None):
     if relative_to:
         if not isinstance(relative_to, FTPFile):
             raise ValueError("relative_to must be another FTPFile "
                              "instance")
         return posixpath.relpath(self._path, relative_to._path).split("/")
     return self._path.split("/")
Example #8
0
def recursive_copy(sourcefs, targetfs, sourcepath, targetpath):
    #normalise paths
    norm_sourcepath = posixpath.normpath(sourcepath)
    norm_targetpath = posixpath.normpath(targetpath)
    
    #Create the first directory in the target file system if it does not already exist
    source_end_path = posixpath.split(posixpath.normpath(sourcepath))[1]
   
    #if the target path exists, make a new directory into the target path directory
    if targetfs.exists(norm_targetpath):
        base_target_path = posixpath.normpath(posixpath.join(norm_targetpath, source_end_path)) 
    #if the target does not exist but its parent does, rename the directory and copy
    elif targetfs.exists(posixpath.normpath(posixpath.join(norm_targetpath, ".."))):
        #If it does not exist, create that directory
        base_target_path = norm_targetpath
    else:
        raise IOError("Cannot copy into target: "+targetpath)

    if not targetfs.exists(base_target_path):
        targetfs.mkdir(base_target_path)
              
    for (path, directories, files) in sourcefs.walk(norm_sourcepath):
        rel_source_path = posixpath.relpath(path, norm_sourcepath)
        new_target_path = posixpath.normpath(posixpath.join(base_target_path, rel_source_path))
        print new_target_path
        for f in files:
            copy_file_into_directory(sourcefs, targetfs, posixpath.join(path, f), new_target_path)
        for d in directories:
            new_directory = posixpath.join(new_target_path, d)
            if not targetfs.exists(new_directory):
                targetfs.mkdir(new_directory)
Example #9
0
def get_url(current, target):
    current = urlparse(current).path
    target = urlparse(target).path

    result = posixpath.relpath(target, current).split('/')
    result = '/'.join(result[1:])
    return result
Example #10
0
def makeNodesRelative(nodes, knobTypes):
    result = { 'warnings': [], 'replacements': [], 'projectFolder': None}
    projectfile = nuke.root()['name'].value() 
    if projectfile =="":
        result['warnings'].append('Please save the nuke script before running this function such that it has a valid path.')
        return result
    projectFolderAbsolute = posixpath.dirname(projectfile)
    result['projectFolder'] = projectFolderAbsolute
    projectFolderRelative = "[file dirname [value root.name]]"

    for n in nodes:
        for k in knobTypes:
            if n.knob(k):
                originalFilePath = n[k].value()
                if n[k].isAnimated():
                    result['warnings'].append("Didn't replace "+k+' of node '+n['name'].value()+' since the knob is animated')
                elif n[k].hasExpression():
                    result['warnings'].append("Didn't replace "+k+' of node '+n['name'].value()+' since the knob has an expression')
                elif originalFilePath.strip()=="":
                    #result['warnings'].append("Didn't replace "+k+' of node '+n['name'].value()+' since it is empty')
                    pass
                elif originalFilePath.startswith(projectFolderRelative): 
                    result['warnings'].append("Didn't replace "+k+' of node '+n['name'].value()+' since it is already a relative path:\n'+ __removePrefix(originalFilePath,projectFolderRelative))
                else:
                    relativeFilePath =  posixpath.relpath(originalFilePath,projectFolderAbsolute)
                    n[k].setValue(projectFolderRelative + '/' +relativeFilePath)
                    result['replacements'].append(k+' of '+ n['name'].value()+':\n'+relativeFilePath)
                        
    return result
Example #11
0
    def _apply_regex_rule(self, rule_name, rule, cat_name, cat_path, settings):
        accepted_flags = {
            'a': re.ASCII,
            'i': re.IGNORECASE,
            'l': re.LOCALE,
            'x': re.VERBOSE
        }
        flags = sum([accepted_flags[f] for f in rule.get('flags', [])])

        pattern = None
        try:
            pattern = re.compile(rule['pattern'], flags)
        except KeyError:
            raise InvalidRegexFilter(cat_name, rule_name)

        actions = []

        rename = rule.get('rename', None)
        for root, dirs, files in os.walk(self._path):
            if posixpath.abspath(root) == self._repo_path:
                continue

            for file_name in files:
                file_name = posixpath.relpath(posixpath.join(root, file_name), self._path)

                match = pattern.match(file_name)
                if match:
                    new_name = file_name
                    if rename:
                        new_name = rename.format(**match.groupdict())

                    new_name = posixpath.join(cat_path, new_name)
                    actions.append(('mv', posixpath.join(self._path, file_name), new_name))

        return actions
Example #12
0
    def _get_relative_path(a, b):
        """
        returns a relative path for navigation from dir *a* to dir *b*

        if the common parent of both is "/", return an absolute path
        """
        a += "/"
        b += "/"
        parent = posixpath.dirname(posixpath.commonprefix([a,b]))
        if parent == "/": return b[:-1]

        a = posixpath.relpath(a, parent)
        b = posixpath.relpath(b, parent)
        if a == ".": return b

        return posixpath.normpath("../" * (a.count("/")+1) + b)
Example #13
0
        def lookup_redirect(url):
            sub_url = url

            for sub_url, _ in Segment(url):
                for base, filename in Segment(sub_url):
                    try:
                        redirects = self._cache.GetFromFile(
                            posixpath.normpath(posixpath.join(base, "redirects.json"))
                        ).Get()
                    except FileNotFoundError:
                        continue

                    redirect = redirects.get(posixpath.join(filename, "..."))

                    if redirect is None:
                        continue

                    redirect = Join(base, redirect.rstrip("..."))

                    # Avoid infinite redirection loops by breaking if seen before.
                    if redirect in seen_redirects:
                        break
                    seen_redirects.add(redirect)
                    return lookup_redirect(Join(redirect, posixpath.relpath(url, sub_url)))
            return url
Example #14
0
 def make_relative(self, url):
     """
     Given a URL path return it as a relative URL,
     given the context of the current page.
     """
     suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
     return posixpath.relpath(url, start=self.base_path) + suffix
Example #15
0
def get_relative_url(destination, source):
    """Get relative URL between two sources.

    http://stackoverflow.com/a/7469668/315168

    :param destination:
    :param source:
    :return: tuple (is same domain, relative url)
    """

    u_dest = urlparse.urlsplit(destination)
    u_src = urlparse.urlsplit(source)

    _uc1 = urlparse.urlunsplit(u_dest[:2]+tuple('' for i in range(3)))
    _uc2 = urlparse.urlunsplit(u_src[:2]+tuple('' for i in range(3)))

    if _uc1 != _uc2:
        ## This is a different domain
        return False, destination

    # If there is no / component in url assume it's root path
    src_path = u_src.path or "/"

    _relpath = posixpath.relpath(u_dest.path, posixpath.dirname(src_path))

    return True, _relpath
    # return True, urlparse.urlunsplit(('', '', _relpath, u_dest.query, u_dest.fragment))
Example #16
0
def path2tag(rootpath, pathname):
    """Convert a pathname to a tagname.

    Normalizes the path before converting to a tagname.

    """
    return '//' + posixpath.relpath(pathname, rootpath)
Example #17
0
 def __repo_for_path(self, path):
     path_prefixes = sorted(self.uri.keys(), key=lambda x: len(x), reverse=True)
     for prefix in path_prefixes:
         if path.startswith(prefix):
             relpath = posixpath.relpath(path, prefix or '.') if path else ''
             return prefix, self.uri[prefix], relpath if relpath != '.' else None
     raise ValueError(("No KnowledgeRepository found for '{}', "
                       "paths must be prefixed with {}.").format(path, path_prefixes))
Example #18
0
    def listdir(self, name):
        path = self._normalize_name(self._clean_name(name))
        # The path needs to end with a slash, but if the root is empty, leave
        # it.
        if path and not path.endswith('/'):
            path += '/'

        directories = []
        files = []
        paginator = self.connection.meta.client.get_paginator('list_objects')
        pages = paginator.paginate(Bucket=self.bucket_name, Delimiter='/', Prefix=path)
        for page in pages:
            for entry in page.get('CommonPrefixes', ()):
                directories.append(posixpath.relpath(entry['Prefix'], path))
            for entry in page.get('Contents', ()):
                files.append(posixpath.relpath(entry['Key'], path))
        return directories, files
Example #19
0
 def __target_relpath(self):
     # workaround for posixpath bug in 2.6, doesn't generate correct
     # relative path when *start* (second) parameter is root ('/')
     if self.__baseURI == '/':
         relpath = self.target.partname[1:]
     else:
         relpath = posixpath.relpath(self.target.partname, self.__baseURI)
     return relpath
Example #20
0
def relative_url(target, base):
    base = urlparse.urlparse(base)
    target = urlparse.urlparse(target)
    if base.netloc != target.netloc:
        raise ValueError('target and base netlocs do not match')
    base_dir = '.' + posixpath.dirname(base.path)
    target = '.' + target.path
    return '/' + posixpath.relpath(target, start=base_dir)
Example #21
0
def posix_relpath(path, root):
  """posix.relpath() that keeps trailing slash.

  It is different from relpath() since it can be used on Windows.
  """
  out = posixpath.relpath(path, root)
  if path.endswith('/'):
    out += '/'
  return out
Example #22
0
 def relurl(otarget, obase):
     base = urlparse.urlparse(obase)
     target = urlparse.urlparse(otarget)
     if base.netloc != target.netloc:
         # raise ValueError('target and base netlocs do not match')
         return otarget
     base_dir = '.' + posixpath.dirname(base.path)
     target = '.' + target.path
     return posixpath.relpath(target, start=base_dir)
def get_relative_url(url, other):
    """
    Return given url relative to other.
    """
    if other != '.':
        # Remove filename from other url if it has one.
        parts = posixpath.split(other)
        other = parts[0] if '.' in parts[1] else other
    relurl = posixpath.relpath(url, other)
    return relurl + '/' if url.endswith('/') else relurl
Example #24
0
    def listdir(self, name):
        path = self._normalize_name(self._clean_name(name))
        # The path needs to end with a slash, but if the root is empty, leave
        # it.
        if path and not path.endswith('/'):
            path += '/'

        directories = []
        files = []
        paginator = self.connection.meta.client.get_paginator('list_objects')
        pages = paginator.paginate(Bucket=self.bucket_name,
                                   Delimiter='/',
                                   Prefix=path)
        for page in pages:
            for entry in page.get('CommonPrefixes', ()):
                directories.append(posixpath.relpath(entry['Prefix'], path))
            for entry in page.get('Contents', ()):
                files.append(posixpath.relpath(entry['Key'], path))
        return directories, files
Example #25
0
def CalcInputs(inputs):
    """Computes the full list of input files. The output is a list of
  (filepath, filepath relative to input dir)"""
    # |inputs| is a list of paths, which may be directories.
    output = []
    for input_file in inputs:
        file_list = ListFilesForPath(input_file)
        dirname = posixpath.dirname(input_file)
        output.extend([(x, posixpath.relpath(x, dirname)) for x in file_list])
    return output
Example #26
0
    def _write_standard_egg_info_metadata(self, zip_info):
        if is_zipinfo_dir(zip_info):
            return

        name = zip_info.filename
        from_egg_info = posixpath.relpath(name, EGG_INFO)
        dest = posixpath.join(self.pyloc, setuptools_egg_info_dir(self.path),
                              from_egg_info)

        self._write_egg_info_arcname(name, dest)
Example #27
0
 def _kp_dir(self, path, parent=None, revision=None):
     ref_prefix = parent + '/' if parent else ''
     revision = revision or self._kp_get_revision(path, enforce_exists=True)
     refs = (self.session.query(self.PostRef.ref)
                         .filter(self.PostRef.path == path)
                         .filter(self.PostRef.ref.like(ref_prefix + '%'))
                         .filter(self.PostRef.revision == revision)).all()
     for (ref,) in refs:
         if ref is not None:
             yield posixpath.relpath(ref, parent or '')
Example #28
0
 def listdir(self, path):
     path = self._get_key_name(path)
     path = "" if path == "." else path + "/"
     # Look through the paths, parsing out directories and paths.
     files = []
     dirs = []
     paginator = self.s3_connection.get_paginator("list_objects_v2")
     pages = paginator.paginate(
         Bucket=self.settings.AWS_S3_BUCKET_NAME,
         Delimiter="/",
         Prefix=path,
     )
     for page in pages:
         for entry in page.get("Contents", ()):
             files.append(posixpath.relpath(entry["Key"], path))
         for entry in page.get("CommonPrefixes", ()):
             dirs.append(posixpath.relpath(entry["Prefix"], path))
     # All done!
     return dirs, files
Example #29
0
def get_relative_url(url, other):
    """
    Return given url relative to other.
    """
    if other != '.':
        # Remove filename from other url if it has one.
        parts = posixpath.split(other)
        other = parts[0] if '.' in parts[1] else other
    relurl = posixpath.relpath(url, other)
    return relurl + '/' if url.endswith('/') else relurl
Example #30
0
 def _kp_dir(self, path, parent=None, revision=None):
     ref_prefix = parent + '/' if parent else ''
     revision = revision or self._kp_get_revision(path, enforce_exists=True)
     refs = (self.session.query(
         self.PostRef.ref).filter(self.PostRef.path == path).filter(
             self.PostRef.ref.like(ref_prefix + '%')).filter(
                 self.PostRef.revision == revision)).all()
     for (ref, ) in refs:
         if ref is not None:
             yield posixpath.relpath(ref, parent or '')
Example #31
0
    def list_artifacts(self, path=None):
        (bucket, artifact_path) = data.parse_s3_uri(self.artifact_uri)
        dest_path = artifact_path
        if path:
            dest_path = posixpath.join(dest_path, path)
        infos = []
        prefix = dest_path + "/" if dest_path else ""
        s3_client = self._get_s3_client()

        list_metadata = s3_client.list_objects_v2(Bucket=bucket,
                                                  Prefix=dest_path,
                                                  MaxKeys=1)
        contents = list_metadata.get('Contents', [])
        if len(contents) == 1 and contents[0].get('Key') == dest_path:
            return []

        paginator = s3_client.get_paginator("list_objects_v2")
        results = paginator.paginate(Bucket=bucket,
                                     Prefix=prefix,
                                     Delimiter='/')
        for result in results:
            # Subdirectories will be listed as "common prefixes" due to the way we made the request
            for obj in result.get("CommonPrefixes", []):
                subdir_path = obj.get("Prefix")
                self._verify_listed_object_contains_artifact_path_prefix(
                    listed_object_path=subdir_path,
                    artifact_path=artifact_path)
                subdir_rel_path = posixpath.relpath(path=subdir_path,
                                                    start=artifact_path)
                if subdir_rel_path.endswith("/"):
                    subdir_rel_path = subdir_rel_path[:-1]
                infos.append(FileInfo(subdir_rel_path, True, None))
            # Objects listed directly will be files
            for obj in result.get('Contents', []):
                file_path = obj.get("Key")
                self._verify_listed_object_contains_artifact_path_prefix(
                    listed_object_path=file_path, artifact_path=artifact_path)
                file_rel_path = posixpath.relpath(path=file_path,
                                                  start=artifact_path)
                file_size = int(obj.get('Size'))
                infos.append(FileInfo(file_rel_path, False, file_size))
        return sorted(infos, key=lambda f: f.path)
Example #32
0
 def __repo_for_path(self, path):
     path_prefixes = sorted(self.uri.keys(),
                            key=lambda x: len(x),
                            reverse=True)
     for prefix in path_prefixes:
         if path.startswith(prefix):
             relpath = posixpath.relpath(path, prefix
                                         or '.') if path else ''
             return prefix, self.uri[
                 prefix], relpath if relpath != '.' else None
     raise ValueError("No KnowledgeRepository found for '{}'.".format(path))
Example #33
0
    def read(self, filename):
        source_path = osp.relpath(filename, self.settings['PATH'])
        path = normalize_path(osp.splitext(source_path)[0])
        with pelican_open(filename) as content:
            if content[-1] == '\n':
                content = content[:-1]
            link = content

        # XX Hack: 'precursors' is injected into this module in __init__.py
        precursors.append(LinkNodePrecursor(path, link))
        return None, {}
Example #34
0
 def _get_shared_path(self, resource: Text, path: Text) -> Optional[Text]:
     for shared_path in self.sharedPaths:
         if path.startswith(shared_path):
             return path
     volumes = self._get_volumes(resource)
     for volume in volumes:
         local, remote = volume.split(':')
         if path.startswith(remote):
             return posixpath.normpath(
                 posixpath.join(local, posixpath.relpath(path, remote)))
     return None
Example #35
0
        def _scrape_dir(path, dst):
            objs = self.client.list(path)
            for hpath, detail in objs:
                relpath = posixpath.relpath(hpath, hdfs_path)
                full_opath = pjoin(dst, relpath)

                if detail['type'] == 'FILE':
                    _get_file(hpath, full_opath)
                else:
                    os.makedirs(full_opath)
                    _scrape_dir(hpath, dst)
Example #36
0
def abs_to_rel_url(base_url, target_url):
    # Calculate relative link from one url to another
    # if both or either has no domain assumed to be from same domain

    base = urlparse(base_url)
    target = urlparse(target_url)
    if base.netloc != '' and target.netloc != '' and base.netloc != target.netloc:
        raise ValueError('target and base netlocs do not match')
    base_dir = '.' + posixpath.dirname(base.path)
    target = '.' + target.path
    return posixpath.relpath(target, start=base_dir)
Example #37
0
    def relpath(self, start, prefix=''):
        if self.root == Root.absolute:
            return self.__localize(self.suffix)
        if self.root != start.root:
            raise ValueError('source mismatch')

        rel = posixpath.relpath(self.suffix or posixpath.curdir, start.suffix
                                or posixpath.curdir)
        if prefix and rel == self.curdir:
            return prefix
        return self.__localize(posixpath.join(prefix, rel))
Example #38
0
 def listdir(self, name):
     path = self._normalize_name(self._clean_name(name))
     if path and not path.endswith('/'):
         path += '/'
     directories = []
     files = []
     paginator = self.connection.meta.client.get_paginator('list_objects')
     pages = paginator.paginate(Bucket=self.bucket_name,
                                Delimiter='/',
                                Prefix=path)
     for page in pages:
         for entry in page.get('CommonPrefixes', ()):
             directories.append(posixpath.relpath(entry['Prefix'], path))
         for entry in page.get('Contents', ()):
             files.append({
                 'Key': posixpath.relpath(entry['Key'], path),
                 'LastModified': entry['LastModified'],
                 'Size': entry['Size']
             })
     return directories, files
def test_context_tar_gz_with_fire():
    preprocessor = ConvertNotebookPreprocessorWithFire(notebook_file=NOTEBOOK_PATH)
    context_file, _ = preprocessor.context_tar_gz()
    tar = tarfile.open(context_file)
    relative_path_prefix = posixpath.relpath(DEFAULT_DEST_PREFIX, "/")
    converted_notebook_path = posixpath.join(os.path.dirname(NOTEBOOK_PATH), os.path.basename(preprocessor.executable))
    notebook_context_path = posixpath.join(relative_path_prefix, converted_notebook_path)
    tar_notebook = tar.extractfile(tar.getmember(notebook_context_path))
    tar_notebook_text = tar_notebook.read().decode()
    os.remove(converted_notebook_path)
    assert "fire.Fire(None)" in tar_notebook_text
Example #40
0
    def make_relative(self, url):
        """
        return the relative url of an ABS URL to base_path
        """
        if url.startswith('/'):
            base_path = '/' + self.base_path.lstrip('/')
            relative_path = posixpath.relpath(url, start=base_path)
        else:  #it is relative url already
            relative_path = url

        return relative_path
Example #41
0
 def backup(self):
     """Download all data from /flash"""
     backup_dir = 'backup_{:%Y-%m-%d_%H_%M_%S}'.format(datetime.datetime.now())
     logging.info('backing up /flash into {}'.format(backup_dir))
     for root, dirs, files in self.target.walk('/flash'):
         local_root = os.path.join(backup_dir, posixpath.relpath(root, '/'))
         if not os.path.exists(local_root):
             os.makedirs(local_root)
         for name in files:
             with open(os.path.join(local_root, name), 'wb') as dst:
                 self.target.get(posixpath.join(root, name), dst)
Example #42
0
        def _scrape_dir(path, dst):
            objs = self.client.list(path)
            for hpath, detail in objs:
                relpath = posixpath.relpath(hpath, hdfs_path)
                full_opath = pjoin(dst, relpath)

                if detail['type'] == 'FILE':
                    _get_file(hpath, full_opath)
                else:
                    os.makedirs(full_opath)
                    _scrape_dir(hpath, dst)
 def list_artifacts(self, path=None):
     from azure.storage.blob.models import BlobPrefix
     (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri)
     dest_path = artifact_path
     if path:
         dest_path = posixpath.join(dest_path, path)
     infos = []
     prefix = dest_path + "/"
     marker = None  # Used to make next list request if this one exceeded the result limit
     while True:
         results = self.client.list_blobs(container,
                                          prefix=prefix,
                                          delimiter='/',
                                          marker=marker)
         for r in results:
             if not r.name.startswith(artifact_path):
                 raise MlflowException(
                     "The name of the listed Azure blob does not begin with the specified"
                     " artifact path. Artifact path: {artifact_path}. Blob name:"
                     " {blob_name}".format(artifact_path=artifact_path,
                                           blob_name=r.name))
             if isinstance(
                     r, BlobPrefix
             ):  # This is a prefix for items in a subdirectory
                 subdir = posixpath.relpath(path=r.name,
                                            start=artifact_path)
                 if subdir.endswith("/"):
                     subdir = subdir[:-1]
                 infos.append(FileInfo(subdir, True, None))
             else:  # Just a plain old blob
                 file_name = posixpath.relpath(path=r.name,
                                               start=artifact_path)
                 infos.append(
                     FileInfo(file_name, False,
                              r.properties.content_length))
         # Check whether a new marker is returned, meaning we have to make another request
         if results.next_marker:
             marker = results.next_marker
         else:
             break
     return sorted(infos, key=lambda f: f.path)
Example #44
0
    def list_artifacts(self, path=None):
        # Newer versions of `azure-storage-blob` (>= 12.4.0) provide a public
        # `azure.storage.blob.BlobPrefix` object to signify that a blob is a directory,
        # while older versions only expose this API internally as
        # `azure.storage.blob._models.BlobPrefix`
        try:
            from azure.storage.blob import BlobPrefix
        except ImportError:
            from azure.storage.blob._models import BlobPrefix

        (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri)
        container_client = self.client.get_container_client(container)
        dest_path = artifact_path
        if path:
            dest_path = posixpath.join(dest_path, path)
        infos = []
        prefix = dest_path if dest_path.endswith("/") else dest_path + "/"
        results = container_client.walk_blobs(name_starts_with=prefix)
        for r in results:
            if not r.name.startswith(artifact_path):
                raise MlflowException(
                    "The name of the listed Azure blob does not begin with the specified"
                    " artifact path. Artifact path: {artifact_path}. Blob name:"
                    " {blob_name}".format(artifact_path=artifact_path,
                                          blob_name=r.name))
            if isinstance(r, BlobPrefix
                          ):  # This is a prefix for items in a subdirectory
                subdir = posixpath.relpath(path=r.name, start=artifact_path)
                if subdir.endswith("/"):
                    subdir = subdir[:-1]
                infos.append(FileInfo(subdir, True, None))
            else:  # Just a plain old blob
                file_name = posixpath.relpath(path=r.name, start=artifact_path)
                infos.append(FileInfo(file_name, False, r.size))
        # The list_artifacts API expects us to return an empty list if the
        # the path references a single file.
        rel_path = dest_path[len(artifact_path) + 1:]
        if (len(infos) == 1) and not infos[0].is_dir and (infos[0].path
                                                          == rel_path):
            return []
        return sorted(infos, key=lambda f: f.path)
Example #45
0
 def backup(self):
     """Download all data from /flash"""
     backup_dir = 'backup_{:%Y-%m-%d_%H_%M_%S}'.format(
         datetime.datetime.now())
     logging.info('backing up /flash into {}'.format(backup_dir))
     for root, dirs, files in self.target.walk('/flash'):
         local_root = os.path.join(backup_dir, posixpath.relpath(root, '/'))
         if not os.path.exists(local_root):
             os.makedirs(local_root)
         for name in files:
             with open(os.path.join(local_root, name), 'wb') as dst:
                 self.target.get(posixpath.join(root, name), dst)
Example #46
0
File: web.py Project: mikkov/cesi
def relative_url_for(endpoint, **values):
    """Like url_for, but generates relative paths for each request."""
    print("relative_url_for")
    url = url_for(endpoint, **values)
    if not url.startswith('/'):
        return url

    request_path = request.path
    if not request_path.endswith('/'):
        request_path = posixpath.dirname(request_path)

    return posixpath.relpath(url, request_path)
Example #47
0
def GetOutputs(files_to_convert, output_basedir):
    """Returns a list of filenames relative to the output directory,
  based on a list of input files."""
    outputs = []
    for filename in files_to_convert:
        dirname = posixpath.dirname(filename)
        relative_filename = posixpath.relpath(filename, dirname)
        relative_filename = ChangeSuffix(relative_filename, 'csv')
        output_filename = posixpath.join(output_basedir, relative_filename)
        outputs.append(output_filename)

    return outputs
Example #48
0
def relpath_to_site(lang, target_lang):
    '''Get relative path from siteurl of lang to siteurl of base_lang
    the output is cached in _SITES_RELPATH_DB
    '''
    path = _SITES_RELPATH_DB.get((lang, target_lang), None)
    if path is None:
        siteurl = _SITE_DB.get(lang, _MAIN_SITEURL)
        target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL)
        path = posixpath.relpath(get_site_path(target_siteurl),
                                 get_site_path(siteurl))
        _SITES_RELPATH_DB[(lang, target_lang)] = path
    return path
def relpath_to_site(lang, target_lang):
    """Get relative path from siteurl of lang to siteurl of base_lang

    the output is cached in _SITES_RELPATH_DB
    """
    path = _SITES_RELPATH_DB.get((lang, target_lang), None)
    if path is None:
        siteurl = _SITE_DB.get(lang, _MAIN_SITEURL)
        target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL)
        path = posixpath.relpath(get_site_path(target_siteurl), get_site_path(siteurl))
        _SITES_RELPATH_DB[(lang, target_lang)] = path
    return path
def test_filter_include_cell():
    preprocessor = ConvertNotebookPreprocessor(notebook_file=NOTEBOOK_PATH,
                                               notebook_preprocessor=FilterIncludeCell)
    context_file, _ = preprocessor.context_tar_gz()
    tar = tarfile.open(context_file)
    relative_path_prefix = posixpath.relpath(DEFAULT_DEST_PREFIX, "/")
    converted_notebook_path = posixpath.join(os.path.dirname(NOTEBOOK_PATH), os.path.basename(preprocessor.executable))
    notebook_context_path = posixpath.join(relative_path_prefix, converted_notebook_path)
    tar_notebook = tar.extractfile(tar.getmember(notebook_context_path))
    tar_notebook_text = tar_notebook.read().decode()
    os.remove(converted_notebook_path)
    assert "print('This cell includes fairing:include-cell')" in tar_notebook_text
Example #51
0
 def getRelativeUrl(self, path, url):
   import urllib.parse
   import posixpath
   u_dest = urllib.parse.urlsplit(url)
   u_src = urllib.parse.urlsplit(path)
   _uc1 = urllib.parse.urlunsplit(u_dest[:2]+tuple('' for i in range(3)))
   _uc2 = urllib.parse.urlunsplit(u_src[:2]+tuple('' for i in range(3)))
   if _uc1 != _uc2:
       ## This is a different domain
       return url
   _relpath = posixpath.relpath(u_dest.path, posixpath.dirname(u_src.path))
   return './%s'%urllib.parse.urlunsplit(('', '', _relpath, u_dest.query, u_dest.fragment))
	def name_to_href(self, name, base):
		"""Changed to ensure that blank href names are referenced as the
		empty string instead of '.'.

		Taken from the calibre Modify Epub plugin's Container implementation.
		"""
		if not base:
			return name
		href = posixpath.relpath(name, base)
		if href == '.':
			href = ''
		return href
Example #53
0
  def url_to(self, target, source=None, isfile=True):
    if source is None:
      if self.current_url is None:
        raise RuntimeError('Context.current_url is not set.')
      source = self.current_url

    source = self.real_url(source, isfile)
    target = self.real_url(target, isfile)
    res = posixpath.relpath(target, posixpath.dirname(source))
    if self.config['statigen.urlFormat'] == 'directory' and source != '/':
      res = '../' + res
    return res
Example #54
0
def _join(base_url, path, *extra, **kwargs):
    base_url = parse(base_url)
    resolve_href = kwargs.get('resolve_href', False)

    (scheme, netloc, base_path, params, query, _) = base_url
    scheme = scheme.lower()

    path_tokens = [
        part for part in itertools.chain(
            _split_all(path),
            itertools.chain.from_iterable(
                _split_all(extra_path) for extra_path in extra))
        if part and part != '/'
    ]

    base_path_args = ['/fake-root']
    if scheme == 's3':
        if netloc:
            base_path_args.append(netloc)

    if base_path.startswith('/'):
        base_path = base_path[1:]

    base_path_args.append(base_path)

    if resolve_href:
        new_base_path, _ = posixpath.split(posixpath.join(*base_path_args))
        base_path_args = [new_base_path]

    base_path_args.extend(path_tokens)
    base_path = posixpath.relpath(posixpath.join(*base_path_args),
                                  '/fake-root')

    if scheme == 's3':
        path_tokens = [
            part for part in _split_all(base_path) if part and part != '/'
        ]

        if path_tokens:
            netloc = path_tokens.pop(0)
            base_path = posixpath.join('', *path_tokens)

    if sys.platform == "win32":
        base_path = convert_to_posix_path(base_path)

    return format(
        urllib_parse.ParseResult(scheme=scheme,
                                 netloc=netloc,
                                 path=base_path,
                                 params=params,
                                 query=query,
                                 fragment=None))
Example #55
0
    def __iter__(self):
        for item in self.previous:
            yield item

        cwd = os.getcwd()
        yield {self.pathkey: posixpath.sep, self.typekey: self.foldertype}

        try:
            os.chdir(self.dirname)
            for (dirpath, dirnames, filenames) in os.walk(os.curdir):
                os.chdir(cwd)

                # Convert path from os.path to posixpath
                dirpath = posixpath.join(*pathsplit(dirpath, ospath=os.path))

                def sortkey(basename, dirpath=dirpath, sortkey=self.sortkey):
                    return sortkey({}, dirpath=dirpath, basename=basename)

                for basename in sorted(filenames, key=sortkey):
                    yield {
                        self.pathkey:
                        posixpath.relpath(
                            posixpath.join(posixpath.sep, dirpath, basename),
                            posixpath.sep)
                    }
                for basename in sorted(dirnames, key=sortkey):
                    yield {
                        self.pathkey:
                        posixpath.relpath(
                            posixpath.join(posixpath.sep, dirpath, basename),
                            posixpath.sep) + posixpath.sep,
                        self.typekey:
                        self.foldertype
                    }

                os.chdir(self.dirname)

        finally:
            os.chdir(cwd)
Example #56
0
 def load_file_stats(self, file_name):
     with open(file_name) as f:
         for line in f:
             line = line.strip()
             elems = line.split(',')
             pid = int(elems[0])
             fid = int(elems[1])
             t = self.find_tuple(pid, fid)
             p = elems[2].strip('"')
             if self.project_path_dict.get(pid) is not None:
                 project_path = self.project_path_dict[pid]
                 p = posixpath.relpath(p, project_path)
             t.add_path(p)
Example #57
0
 def relative_ref(self, baseURI):
     """
     Return string containing relative reference to package item from
     *baseURI*. E.g. PackURI('/ppt/slideLayouts/slideLayout1.xml') would
     return '../slideLayouts/slideLayout1.xml' for baseURI '/ppt/slides'.
     """
     # workaround for posixpath bug in 2.6, doesn't generate correct
     # relative path when *start* (second) parameter is root ('/')
     if baseURI == '/':
         relpath = self[1:]
     else:
         relpath = posixpath.relpath(self, baseURI)
     return relpath
Example #58
0
    def _get_project_from_path(full_path):
        """
        Split the absolute path in root_path and project_path using the top_path function in Settings()

        Args:
            full_path (str): absolute path

        Returns:
            str, str: root_path, project_path
        """
        root = Settings().top_path(full_path)
        pr_path = posixpath.relpath(full_path, root)
        return root, pr_path
Example #59
0
def relative_links(root):
    for path in pathlib.Path(root).rglob('*'):
        if path.is_symlink():
            link_target = os.readlink(str(path))
            if link_target[0] != '/':
                continue
            if link_target.startswith(str(root)):
                continue
            path.unlink()
            new_target = posixpath.relpath(root / link_target[1:],
                                           start=path.parent)
            # print(path, ':', link_target, '->', new_target)
            os.symlink(new_target, str(path))
Example #60
0
    def _rewrite_url(self, css_url, asset_url):
        """
        Pulled from:
        http://stackoverflow.com/questions/7469573/how-to-construct-relative-url-given-two-absolute-urls-in-python

        """
        base = urlparse(css_url)
        target = urlparse(asset_url)
        if base.netloc != target.netloc:
            return asset_url
        base_dir = '.' + posixpath.dirname(base.path)
        target = '.' + target.path
        return posixpath.relpath(target, start=base_dir)