示例#1
0
def remove_backslashes_and_dotdots(directory):
    """
    Walk a directory and rename the files if their names contain backslashes.
    Return a list of errors if any.
    """
    if on_linux:
        directory = path_to_bytes(directory)
    errors = []
    for top, _, files in os.walk(directory):
        for filename in files:
            if not (WIN_PATH_SEP in filename or DOTDOT in filename):
                continue
            try:
                new_path = fileutils.as_posixpath(filename)
                new_path = new_path.strip(POSIX_PATH_SEP)
                new_path = posixpath.normpath(new_path)
                new_path = new_path.replace(DOTDOT, POSIX_PATH_SEP)
                new_path = new_path.strip(POSIX_PATH_SEP)
                new_path = posixpath.normpath(new_path)
                segments = new_path.split(POSIX_PATH_SEP)
                directory = os.path.join(top, *segments[:-1])
                fileutils.create_dir(directory)
                shutil.move(os.path.join(top, filename), os.path.join(top, *segments))
            except Exception:
                errors.append(os.path.join(top, filename))
    return errors
示例#2
0
 def __make_urls(self, repoline):
     """The same as above, but only for one line"""
     match = re.match(r"(?P<repo_type>deb|deb-src)\s+(?P<base_url>[\S]+?)/?\s+((?P<simple_repo>[\S]*?/)|(?P<repo>\S*?[^/\s])(?:\s+(?P<sections>[^/]+?)))\s*$", repoline)
     if not match:
         raise AptRepoException("Unable to parse: %s" % repoline)
    
     url_bins = []
     url_srcs = []
     repo_type = match.group("repo_type")
     if match.group("simple_repo"):
         if repo_type == "deb":
             __path = posixpath.normpath(posixpath.join("./" + match.group("simple_repo"), "Packages"))
             url_bins = [ (posixpath.join(match.group("base_url"), __path), match.group("simple_repo"), '') ]
         elif repo_type == "deb-src":
             __path = posixpath.normpath(posixpath.join("./" + match.group("simple_repo"), "Sources"))
             url_srcs = [ (posixpath.join(match.group("base_url"), __path), match.group("simple_repo"), '' ) ]
         else:
             raise AptRepoException("Unknown repository type: %s" % repo_type)
     else:
         if repo_type == "deb":
             for item in re.split("\s+", match.group("sections")):
                 for arch in self._arch:
                     url_bins.append( (posixpath.join(match.group("base_url"), "dists", match.group("repo"), item, "binary-%s/Packages" % arch), match.group("repo"), item))
         elif repo_type == "deb-src":
             for item in match.group("sections").split():
                 url_srcs.append( (posixpath.join(match.group("base_url"), "dists", match.group("repo"), item, "source/Sources"), match.group("repo"), item))
         else:
             raise AptRepoException("Unknown repository type: %s" % repo_type)
     return (match.group("base_url"), url_srcs, url_bins)
def download_hash_list(config, config_filename, server):
    """
	Downloads file with indexes of file found in remote vault and return this list
	@param config: configparser object
	@param server: easywebdav object
	"""
    config.read(config_filename)
    save_loc = "."
    rem_list = config.get("RemoteFolders", "list_dir")
    # the file should given this code be called checksums.txt, @todo: smarter
    # remote = always posix
    remote_checksumfile = posixpath.normpath(posixpath.join(rem_list, "checksums.txt"))
    local_checksumfile = path.join(save_loc, "checksums.txt")
    # local can be 'nt' or 'posix', let's normalise stuff
    if myos == "nt":
        local_checksumfile = ntpath.normpath(local_checksumfile)
    elif myos == "posix":
        local_checksumfile = posixpath.normpath(local_checksumfile)
    server.download(remote_checksumfile, local_checksumfile)
    # curr_file = open(path.join(save_loc,'hashlist.txt'))
    with open(local_checksumfile, "r") as curr_file:
        download_list = curr_file.readlines()
        if DEBUG:
            print(download_list)
    download_list = [i.split() for i in download_list if not i[0] == "#"]  # remove comments from list and split string
    md5_list = [i for i in download_list if "md" in i[0]]
    sha256_list = [i for i in download_list if "sha" in i[0]]
    return download_list, md5_list, sha256_list
	def handle(self):
		self.data = self.request.recv(1024).strip()
		request_list = self.data.split()
		path=posixpath.normpath(request_list[1])
		path=path[1:];
		if path == '':
			path = 'index.html'
		if os.path.isdir(path):
			path = path + '/index.html'
		path=posixpath.normpath(path)
		print path
		if  os.access(path, os.R_OK) is False:
			self.request.sendall("HTTP/1.1 404 NOT FOUND\r\n")
			self.send_header('Date', self.date_time_string())
			self.send_header('Content-Type',"text/plain; charset=UTF-8")
			self.send_header('Connection','closed')
			self.end_header()
			self.request.sendall(DEFAULT_ERROR_MESSAGE)
		if os.access(path, os.R_OK) is True:
			name,extend = os.path.splitext(path)
			try:
				with open(path,'r') as f:
					buffer = f.read()
					self.request.sendall("HTTP/1.1 200 OK\r\n")
					self.send_header('Date', self.date_time_string())
					self.send_header('Content-Type',"text/%s"%(extend[1:]))
					self.send_header('Connection','closed')
					self.end_header()
					self.request.sendall(buffer)
			except IOError as e:
				print "I/O error({0}): {1}".format(e.errno, e.strerror)
		print("Got a request of: %s\n" % self.data)
示例#5
0
    def locate_imported_file(self, source_dir, import_path):
        """ Locate the imported file in the source directory.
            Return the path to the imported file relative to STATIC_ROOT

        :param source_dir: source directory
        :type source_dir: str
        :param import_path: path to the imported file
        :type import_path: str
        :returns: str

        """
        if not import_path.endswith(self.EXTENSION):
            import_path += self.EXTENSION
        path = posixpath.normpath(posixpath.join(source_dir, import_path))

        try:
            self.get_full_source_path(path)
            return path
        except ValueError:
            pass

        filename = posixpath.basename(import_path)
        if filename[0] != "_":
            path = posixpath.normpath(posixpath.join(
                source_dir,
                posixpath.dirname(import_path),
                "_" + filename,
            ))

        try:
            self.get_full_source_path(path)
            return path
        except ValueError:
            pass
示例#6
0
def update_config_file(dryrun=False):
    for site, site_config in config.sites.items():
        redis_processes = [(x, site_config.processes[x]) for x in site_config.processes if site_config.processes[x]["type"] == "redis"]
        template_path = site_config['redis']['template']
        print redis_processes
        for process_name, process in redis_processes:
            working_directoy = posixpath.normpath(posixpath.join(env.path, '..', 'data', 'redis', process_name))
            log_directory = posixpath.normpath(posixpath.join(site_config['deployment']['logdir'], 'log', 'redis'))
            run('mkdir -p ' + working_directoy)
            run('mkdir -p ' + log_directory)
            context_dict = site_config
            context_dict.update({
                'site': site,
                'working_directory': working_directoy,
                'log_directory': log_directory,
                'process_name': process_name,
                'socket': process['socket'],
            })
            path = posixpath.abspath(posixpath.join(site_config['deployment']['path'], '..', 'config', process_name + '.conf'))
            output = render_template(template_path, context_dict)
            if dryrun:
                print path + ":"
                print output
            else:
                put(StringIO.StringIO(output), path)
    def execute(self):
        productionLocation = self.productionDetails[indexer.INDEX_PRODUCTION_LOCATION]
        fileLocation = self.fileDetails[indexer.INDEX_FILE_LOCATION]
        fileLocation = os.path.join(productionLocation, fileLocation)
        subDirs = fileLocation.replace(self.sourceDirectory, self.targetDirectory, 1)
        fileLocationDir = os.path.dirname(subDirs)
        absRefLoc = os.path.normcase(posixpath.normpath(os.path.join(self.productionDetails[2], subDirs)))
        
        absNewLoc = os.path.normcase(posixpath.normpath(os.path.join(self.productionDetails[2], self.referenceFileDetails[indexer.INDEX_FILE_LOCATION])))
        newpath = "//"+_relpath(absNewLoc, fileLocationDir)
        handle = blendfile.openBlendFile(fileLocation, 'r+b')
        for libraryblock in handle.FindBlendFileBlocksWithCode("LI"):
            
            relPath = libraryblock.Get("name").split("\0")[0].replace("\\", "/")
            absPath = blendfile.blendPath2AbsolutePath(fileLocation, relPath)
            normPath = os.path.normpath(absPath)
            if normPath==absNewLoc:
                libraryblock.Set("name", newpath)

        for imageblock in handle.FindBlendFileBlocksWithCode("IM"):
            
            relPath = imageblock.Get("name").split("\0")[0].replace("\\", "/")
            absPath = blendfile.blendPath2AbsolutePath(fileLocation, relPath)
            normPath = os.path.normpath(absPath)
            if normPath==absNewLoc:
                imageblock.Set("name", newpath)
            
        handle.close()
    def execute(self):
        productionLocation = self.productionDetails[2]
        fileLocation = self.fileDetails[3]
        fileLocation = os.path.join(productionLocation, fileLocation)
        fileLocationDir = os.path.dirname(fileLocation)
        absRefLoc = os.path.normcase(posixpath.normpath(os.path.join(self.productionDetails[2], self.currentFileLocation)))
        
        absNewLoc = os.path.normcase(posixpath.normpath(os.path.join(os.path.join(self.productionDetails[2], self.newLocation), self.currentFilename)))
        newpath = "//"+_relpath(absNewLoc, fileLocationDir)
        handle = blendfile.openBlendFile(fileLocation, 'r+b')
        for libraryblock in handle.FindBlendFileBlocksWithCode("LI"):
            
            relPath = libraryblock.Get("name").split("\0")[0].replace("\\", "/")
            absPath = blendfile.blendPath2AbsolutePath(fileLocation, relPath)
            normPath = os.path.normpath(absPath)
            if normPath==absRefLoc:
                libraryblock.Set("name", newpath)

        for imageblock in handle.FindBlendFileBlocksWithCode("IM"):
            
            relPath = imageblock.Get("name").split("\0")[0].replace("\\", "/")
            absPath = blendfile.blendPath2AbsolutePath(fileLocation, relPath)
            normPath = os.path.normpath(absPath)
            if normPath==absRefLoc:
                imageblock.Set("name", newpath)
            
        handle.close()
示例#9
0
def update_config_file(dryrun=False):
    for site, site_config in config.sites.items():
        redis_processes = [
            (x, site_config.processes[x]) for x in site_config.processes if site_config.processes[x]["type"] == "redis"
        ]
        template_path = site_config["redis"]["template"]
        print redis_processes
        for process_name, process in redis_processes:
            working_directoy = posixpath.normpath(posixpath.join(env.path, "..", "data", "redis", process_name))
            log_directory = posixpath.normpath(posixpath.join(env.path, "..", "log", "redis"))
            run("mkdir -p " + working_directoy)
            run("mkdir -p " + log_directory)
            context_dict = site_config
            context_dict.update(
                {
                    "site": site,
                    "working_directory": working_directoy,
                    "log_directory": log_directory,
                    "process_name": process_name,
                    "socket": process["socket"],
                }
            )
            path = posixpath.abspath(
                posixpath.join(site_config["deployment"]["path"], "..", "config", process_name + ".conf")
            )
            output = render_template(template_path, context_dict)
            if dryrun:
                print path + ":"
                print output
            else:
                put(StringIO.StringIO(output), path)
示例#10
0
def _abssource(repo, push=False, abort=True):
    """return pull/push path of repo - either based on parent repo .hgsub info
    or on the top repo config. Abort or return None if no source found."""
    if hasattr(repo, '_subparent'):
        source = repo._subsource
        if source.startswith('/') or '://' in source:
            return source
        parent = _abssource(repo._subparent, push, abort=False)
        if parent:
            if '://' in parent:
                if parent[-1] == '/':
                    parent = parent[:-1]
                r = urlparse.urlparse(parent + '/' + source)
                r = urlparse.urlunparse((r[0], r[1],
                                         posixpath.normpath(r[2]),
                                         r[3], r[4], r[5]))
                return r
            else: # plain file system path
                return posixpath.normpath(os.path.join(parent, repo._subsource))
    else: # recursion reached top repo
        if hasattr(repo, '_subtoppath'):
            return repo._subtoppath
        if push and repo.ui.config('paths', 'default-push'):
            return repo.ui.config('paths', 'default-push')
        if repo.ui.config('paths', 'default'):
            return repo.ui.config('paths', 'default')
    if abort:
        raise util.Abort(_("default path for subrepository %s not found") %
            reporelpath(repo))
示例#11
0
 def report_path(self, path):
     new_path = path.replace(PathConstants.PACKAGE_BINARY, "${PROJECT_BUILD_DIR}")
     new_path = new_path.replace(PathConstants.CATKIN_DEVEL, "${CATKIN_DEVEL_PREFIX}")
     new_path = new_path.replace(PathConstants.CATKIN_INSTALL, "${CATKIN_INSTALL_PREFIX}")
     if new_path.startswith(PathConstants.PACKAGE_SOURCE):
         return posixpath.normpath(path[len(PathConstants.PACKAGE_SOURCE) + 1:])
     return posixpath.normpath(new_path)
示例#12
0
文件: base.py 项目: digantasahoo/nova
    def lookup(self, path):
        if path == "" or path[0] != "/":
            path = posixpath.normpath("/" + path)
        else:
            path = posixpath.normpath(path)

        # fix up requests, prepending /ec2 to anything that does not match
        path_tokens = path.split('/')[1:]
        if path_tokens[0] not in ("ec2", "openstack"):
            if path_tokens[0] == "":
                # request for /
                path_tokens = ["ec2"]
            else:
                path_tokens = ["ec2"] + path_tokens
            path = "/" + "/".join(path_tokens)

        # all values of 'path' input starts with '/' and have no trailing /

        # specifically handle the top level request
        if len(path_tokens) == 1:
            if path_tokens[0] == "openstack":
                versions = OPENSTACK_VERSIONS + ["latest"]
            else:
                versions = VERSIONS + ["latest"]
            return versions

        try:
            if path_tokens[0] == "openstack":
                data = self.get_openstack_item(path_tokens[1:])
            else:
                data = self.get_ec2_item(path_tokens[1:])
        except (InvalidMetadataVersion, KeyError):
            raise InvalidMetadataPath(path)

        return data
示例#13
0
def compile_templates(root, output, minify, input_encoding='utf-8', watch=True):
    """Compile all templates in root or root's subfolders."""
    root = posixpath.normpath(root)
    root_len = len(root)
    output = posixpath.normpath(output)
    for dirpath, dirnames, filenames in os.walk(root):
        dirpath = posixpath.normpath(dirpath)
        if posixpath.basename(dirpath).startswith('.'): continue
        filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.endswith('.py') and not f.endswith('.pyc')]
        outdir = posixpath.join(output , dirpath[root_len:])
        if not posixpath.exists(outdir):
            os.makedirs(outdir)
        if not posixpath.exists(posixpath.join(outdir, '__init__.py')):
            out = open(posixpath.join(outdir, '__init__.py'), 'w')
            out.close()
        for f in filenames:
            path = posixpath.join(dirpath, f).replace('\\','/')
            outfile = posixpath.join(outdir, f.replace('.','_')+'.py')
            filemtime = os.stat(path)[stat.ST_MTIME]
            if not exists(outfile) or os.stat(outfile)[stat.ST_MTIME] < filemtime:
                uri = path[root_len+1:]
                print 'compiling', uri
                text = file(path).read()
                if minify:
                    text = minify_js_in_html(uri, text)
                t = mako.template.Template(text=text, filename=path, uri=uri, input_encoding=input_encoding)
                out = open(outfile, 'w')
                out.write( t.code)
                out.close()
        if watch:
            watch_folder_for_changes(dirpath, minify)
示例#14
0
 def normalizeUrl(self, url, base=None):
     if url and not (isHttpUrl(url) or os.path.isabs(url)):
         if base is not None and not isHttpUrl(base) and u'%' in url:
             url = unquote(url)
         if base:
             if isHttpUrl(base):
                 scheme, sep, path = base.partition(u"://")
                 normedPath = scheme + sep + posixpath.normpath(os.path.dirname(path) + u"/" + url)
             else:
                 if u'%' in base:
                     base = unquote(base)
                 normedPath = os.path.normpath(os.path.join(os.path.dirname(base),url))
         else: # includes base == '' (for forcing relative path)
             normedPath = url
         if normedPath.startswith(u"file://"): normedPath = normedPath[7:]
         elif normedPath.startswith(u"file:\\"): normedPath = normedPath[6:]
         
         # no base, not normalized, must be relative to current working directory
         if base is None and not os.path.isabs(url): 
             normedPath = os.path.abspath(normedPath)
     else:
         normedPath = url
     
     if normedPath:
         if isHttpUrl(normedPath):
             scheme, sep, pathpart = normedPath.partition(u"://")
             pathpart = pathpart.replace(u'\\',u'/')
             endingSep = u'/' if pathpart[-1] == u'/' else u''  # normpath drops ending directory separator
             return scheme + u"://" + posixpath.normpath(pathpart) + endingSep
         normedPath = os.path.normpath(normedPath)
         if normedPath.startswith(self.cacheDir):
             normedPath = self.cacheFilepathToUrl(normedPath)
     return normedPath
示例#15
0
def rewrite_file(filename):
    with open(filename) as f:
        old_file = f.read().splitlines()

    new_file = []
    deferred_imports = {}
    lineiter = iter(old_file)
    for line in lineiter:
        # rewrite from imports
        match = _from_import_re.search(line)
        if match is not None:
            fromlist = line[match.end():]
            new_file.extend(rewrite_from_imports(fromlist,
                                                 match.group(1),
                                                 lineiter))
            continue
        # rewrite attribute access to 'werkzeug'
        def _handle_match(match):
            attr = match.group(2)
            mod = find_module(attr)
            if mod == 'werkzeug':
                return match.group(0)
            deferred_imports.setdefault(mod, []).append(attr)
            return attr
        new_file.append(_direct_usage.sub(_handle_match, line))
    if deferred_imports:
        inject_imports(new_file, deferred_imports)

    for line in difflib.unified_diff(old_file, new_file,
                     posixpath.normpath(posixpath.join('a', filename)),
                     posixpath.normpath(posixpath.join('b', filename)),
                     lineterm=''):
        print line
示例#16
0
    def get_template(self, uri, module=None):
        """Return a :class:`.Template` object corresponding to the given
        URL.

        Note the "relativeto" argument is not supported here at the moment.

        """

        try:
            if self.filesystem_checks:
                return self._check(uri, self._collection[uri])
            else:
                return self._collection[uri]
        except KeyError:

            if uri[0] == "/" and os.path.isfile(uri):
                return self._load(uri, uri)

            # Case 1: Used with Template.forModule
            if module != None and hasattr(module, "_dir"):
                srcfile = posixpath.normpath(posixpath.join(module._dir, uri))
                if os.path.isfile(srcfile):
                    return self._load(srcfile, srcfile)

            # Case 2: We look through the dirs in the TemplateLookup
            u = re.sub(r"^\/+", "", uri)
            for dir in self.directories:
                srcfile = posixpath.normpath(posixpath.join(dir, u))
                if os.path.isfile(srcfile):
                    return self._load(srcfile, uri)
            else:
                # We did not find anything, so we raise an exception
                raise exceptions.TopLevelLookupException("Can't locate template for uri {0!r}".format(uri))
def gen_webv_fake_data(expdic=settings.CREATE_D, type_exp='chantigap', 
						pseudotype_dic=settings.PSEUDO_D, id='MAC07', 
						wave_dic=None, outpath='TEST', iswin=False):
	""" Generates a WEPV folder with data of the type/kind/size specs anticipated.
	Files in the WEPV folder are made up of random bytes.
	@todo: fsep/code placement, generic settings, os. selfchecks
	"""
	# generate time and date strings (based upon dt.datetime.now()
	datestr, timestr = fake_time()
	# check what kind of pseudo we need (B or A)
	pseudo_pre = pseudotype_dic[type_exp]
	pseudo = fake_pseudo(ptype=pseudo_pre)
	twave = fake_wave(dic=wave_dic, experiment=type_exp)
	wepvstring = (pseudo + '_' + twave + '_' + type_exp + '_' + datestr + '_' 
					+ timestr + '_' + id)
	#now let's create that dir, first simple
	if iswin:
		ntpath.normpath(outpath)
	else:
		posixpath.normpath(outpath)
	if not os.path.exists(outpath + os.path.sep + wepvstring):
		try:
			os.mkdir(outpath + os.path.sep + wepvstring)
		except OSError as exc:
			if exc.errno != errno.EEXIST:
				raise exc
		pass
		print (outpath + os.path.sep + wepvstring + " was created")
	else:
		print (outpath + os.path.sep + wepvstring + " already exists")
	subd = expdic[type_exp]
	for k, v in subd.items():
		fake_file(outpath + os.path.sep + wepvstring + os.path.sep + wepvstring + '_' + k, bytesize=v)
示例#18
0
    def _RedirectFromConfig(self, url):
        """ Look up redirects.json file in the directory hierarchy of |url|.
    Directory-level redirects occur first, followed by the specific file
    redirects. Returns the URL to the redirect, if any exist, or None.
    """
        dirname, filename = posixpath.split(url)
        redirected_dirname = self._RedirectDirectory(dirname)

        # Set up default return value.
        default_redirect = None
        if redirected_dirname != dirname:
            default_redirect = posixpath.normpath(Join(redirected_dirname, filename))

        try:
            rules = self._cache.GetFromFile(posixpath.normpath(Join(redirected_dirname, "redirects.json"))).Get()
        except FileNotFoundError:
            return default_redirect

        redirect = rules.get(filename)
        if redirect is None:
            return default_redirect
        if redirect.startswith("/") or urlsplit(redirect).scheme in ("http", "https"):
            return redirect

        return posixpath.normpath(Join(redirected_dirname, redirect))
def dependsOnBoostSettings(env):
    env.Debug(". Adding compiler/linker settings for: Boost")
    buildRootPath = Dir("#").abspath
    # For BOOST Dynamic linking is used
    env.Append(CCFLAGS=["-DBOOST_LOG_DYN_LINK"])
    # TODO: next line needs to be updated after external has been moved to cpp
    env["BOOST_BASE_DIR"] = posixpath.normpath(buildRootPath + "/../external/boost_1_56_0")
    env["BOOST_LINK_PATH"] = posixpath.normpath(buildRootPath + "/../external/boost_1_56_0/lib/linux")
    env.Append(CPPPATH=[env["BOOST_BASE_DIR"]])
    libpath = env["BOOST_BASE_DIR"] + "/lib/linux"
    env.Append(LIBPATH=[libpath])
    libs = [
        "boost_chrono",
        "boost_date_time",
        "boost_filesystem",
        "boost_log",
        "boost_log_setup",
        "boost_program_options",
        "boost_regex",
        "boost_system",
        "boost_thread",
    ]
    env.Append(LIBS=libs)
    env.Append(LINKFLAGS="-Wl,-rpath,$BOOST_LINK_PATH")
    #
    env.Parent().ExternalLibrary("BOOST").AddDynamicLibraryData(libpath, libs, "so.1.56.0")
示例#20
0
文件: WebCache.py 项目: fukkun/Arelle
 def normalizeUrl(self, url, base=None):
     if url and not (url.startswith('http://') or os.path.isabs(url)):
         if base is not None and not base.startswith('http:') and '%' in url:
             url = unquote(url)
         if base:
             if base.startswith("http://"):
                 prot, sep, path = base.partition("://")
                 normedPath = prot + sep + posixpath.normpath(os.path.dirname(path) + "/" + url)
             else:
                 if '%' in base:
                     base = unquote(base)
                 normedPath = os.path.normpath(os.path.join(os.path.dirname(base),url))
         else:
             normedPath = url
         if normedPath.startswith("file://"): normedPath = normedPath[7:]
         elif normedPath.startswith("file:\\"): normedPath = normedPath[6:]
         
         # no base, not normalized, must be relative to current working directory
         if base is None and not os.path.isabs(url): 
             normedPath = os.path.abspath(normedPath)
     else:
         normedPath = url
     
     if normedPath:
         if normedPath.startswith('http://'):
             pathpart = normedPath[7:].replace('\\','/')
             endingSep = '/' if pathpart[-1] == '/' else ''  # normpath drops ending directory separator
             return "http://" + posixpath.normpath(pathpart) + endingSep
         normedPath = os.path.normpath(normedPath)
         if normedPath.startswith(self.cacheDir):
             normedPath = self.cacheFilepathToUrl(normedPath)
     return normedPath
示例#21
0
文件: web_ui.py 项目: blaxter/Bitten
 def _replace(m):
     filepath = posixpath.normpath(m.group('path').replace('\\', '/'))
     if not cache.get(filepath) is True:
         parts = filepath.split('/')
         path = ''
         for part in parts:
             path = posixpath.join(path, part)
             if path not in cache:
                 try:
                     full_path = posixpath.join(config.path, path)
                     full_path = posixpath.normpath(full_path)
                     if full_path.startswith(config.path + "/") or full_path == config.path:
                         repos.get_node(full_path,
                                        build.rev)
                         cache[path] = True
                     else:
                         cache[path] = False
                 except TracError:
                     cache[path] = False
             if cache[path] is False:
                 return m.group(0)
     link = href(config.path, filepath)
     if m.group('line'):
         link += '#L' + m.group('line')[1:]
     return Markup(tag.a(m.group(0), href=link))
示例#22
0
def open_how_to():
    failed_list = []
    bash = ''
    if (get_os_name() == "Windows"):
      bash = 'bash ';

    for lang in languages:     
        path = _base_path + lang['lang']
        how_to_list = glob.glob((path + '//' + "HowTo*"))
        for how_to in how_to_list:
            print "*" * 70
            build_sh = posixpath.normpath("%s/build.sh" % how_to)
            build_sh = build_sh.replace("\\", "/")
            run_sh = posixpath.normpath("%s/run.sh" % how_to)
            run_sh = run_sh.replace("\\", "/")
            print " - Building %s" % build_sh #os.path.basename(how_to)
            if subprocess.call("%s./%s" % (bash, build_sh)) == 0:
                print "     - Build succeeded"
                # print " - Running %s " % run_sh #os.path.basename(how_to)
                # print "     - ", "%s./%s" % (bash, run_sh)
                # subprocess.call("%s./%s" % (bash, run_sh))
            else:
                print "     - Build failed."
                print "     - Removing %s" %how_to
                failed_list.append((os.path.basename(how_to), lang['lang']))
                shutil.rmtree(how_to)
    print '*' * 70
    print ' - Failed builds:'
    for name, lang in failed_list:
        print '     - %s : %s' %(lang, name)
    print '*' * 70
示例#23
0
    def _converter(self, url):
        if url.startswith(('#', 'data:')):
            return url
        elif url.startswith(SCHEMES):
            return self.add_suffix(url)
        full_url = posixpath.normpath('/'.join([str(self.directory_name),
                                                url]))

        # custom
        partial_url = full_url[len(self.url) + 1:]
        if not os.path.isfile(
                os.path.join(
                    django_settings.STATIC_ROOT, partial_url)):
            dir_name = '/'.join((
                self.url,
                os.path.dirname(self.path)[len('/'.join([
                    django_settings.PIMPMYTHEME_FOLDER_NAME,
                    django_settings.SETTINGS_MODULE.split('.')[0]])) + 1:]))
            full_url = posixpath.normpath('/'.join([str(dir_name), url]))
        # end custom

        if self.has_scheme:
            full_url = "%s%s" % (self.protocol, full_url)
        full_url = self.add_suffix(full_url)
        return self.post_process_url(full_url)
示例#24
0
    def __call__(self, req):
        #if os.path.normpath("/" + req.path_info) == "/":
        #    return(base.ec2_md_print(base.VERSIONS + ["latest"]))

        path = req.path_info
        if path == "" or path[0] != "/":
            path = posixpath.normpath("/" + path)
        else:
            path = posixpath.normpath(path)

        path_tokens = path.split('/')[1:]

        if path_tokens[0] not in ("ip", "help"):
            if path_tokens[0] == "":
                # request for /
                #path_tokens = ["ip"]
                #TODO
                raise webob.exc.HTTPNotFound()
        elif path_tokens[0] == u'ip' and path_tokens[1]:
            data = self.check_ipv4(path_tokens[1])
        else:
            #TODO
            raise webob.exc.HTTPNotFound()

        return Response(json.dumps(data), content_type='application/json')
示例#25
0
    def locate_imported_file(self, source_dir, import_path):
        """ Locate the imported file in the source directory.
            Return the relative path to the imported file in posix format.

        :param source_dir: source directory
        :type source_dir: str
        :param import_path: path to the imported file
        :type import_path: str
        :returns: str

        """
        if not import_path.endswith("." + self.input_extension):
            import_path += "." + self.input_extension
        path = posixpath.normpath(posixpath.join(source_dir, import_path))

        try:
            self.get_full_source_path(path)
            return path
        except ValueError:
            pass

        filename = posixpath.basename(import_path)
        if filename[0] != "_":
            path = posixpath.normpath(posixpath.join(source_dir, posixpath.dirname(import_path), "_" + filename))

        try:
            self.get_full_source_path(path)
            return path
        except ValueError:
            pass

        raise exceptions.StaticCompilationError("Can't locate the imported file: {0}".format(import_path))
示例#26
0
    def list(self):

        self._symbols_path = {}
        symbols = []
        if get_resource("symbols"):
            for root, _, files in os.walk(get_resource("symbols")):
                for filename in files:
                    if filename.startswith('.'):
                        continue
                    symbol_file = posixpath.normpath(os.path.relpath(os.path.join(root, filename), get_resource("symbols"))).replace('\\', '/')
                    theme = posixpath.dirname(symbol_file).replace('/', '-').capitalize()
                    if not theme:
                        continue
                    symbol_id = ':/symbols/' + symbol_file
                    symbols.append({'symbol_id': symbol_id,
                                    'filename': filename,
                                    'theme': theme,
                                    'builtin': True})
                    self._symbols_path[symbol_id] = os.path.join(root, filename)

        directory = self.symbols_path()
        if directory:
            for root, _, files in os.walk(directory):
                for filename in files:
                    if filename.startswith('.'):
                        continue
                    symbol_file = posixpath.normpath(os.path.relpath(os.path.join(root, filename), directory)).replace('\\', '/')
                    symbols.append({'symbol_id': symbol_file,
                                    'filename': filename,
                                    'builtin': False,
                                    'theme': "Custom symbols"})
                    self._symbols_path[symbol_file] = os.path.join(root, filename)

        symbols.sort(key=lambda x: x["filename"])
        return symbols
示例#27
0
    def translate_path(self, path):
        """Translate a /-separated PATH to the local filename syntax.

        Components that mean special things to the local file system
        (e.g. drive or directory names) are ignored.  (XXX They should
        probably be diagnosed.)

        """
        try:
            path = posixpath.normpath(urllib.unquote(path))
            words = path.split('/')
            words = filter(None, words)
            path = os.getcwd()
            for word in words:
                drive, word = os.path.splitdrive(word)
                head, word = os.path.split(word)
                if word in (os.curdir, os.pardir): continue
                path = os.path.join(path, word)
            return path
        except Exception, e:
           self.send_error(403, e)
           path = posixpath.normpath(urllib.unquote(path))
           words = path.split('/')
           words = filter(None, words)
           path = os.getcwd()
           for word in words:
               drive, word = os.path.splitdrive(word)
               head, word = os.path.split(word)
               if word in (os.curdir, os.pardir): continue
               path = os.path.join(path, word)
           return path.encode("utf-8")
示例#28
0
  def Extract(self):
    """Extract the tarfile to the current directory."""
    if self.verbose:
      sys.stdout.write('|' + ('-' * 48) + '|\n')
      sys.stdout.flush()
      dots_outputted = 0

    win32_symlinks = {}
    for m in self.tar:
      if self.verbose:
        cnt = self.read_file.tell()
        curdots = cnt * 50 / self.read_filesize
        if dots_outputted < curdots:
          for dot in xrange(dots_outputted, curdots):
            sys.stdout.write('.')
          sys.stdout.flush()
          dots_outputted = curdots

      # For hardlinks in Windows, we try to use mklink, and instead copy on
      # failure.
      if m.islnk() and sys.platform == 'win32':
        CreateWin32Link(m.name, m.linkname, self.verbose)
      # On Windows we treat symlinks as if they were hard links.
      # Proper Windows symlinks supported by everything can be made with
      # mklink, but only by an Administrator.  The older toolchains are
      # built with Cygwin, so they could use Cygwin-style symlinks; but
      # newer toolchains do not use Cygwin, and nothing else on the system
      # understands Cygwin-style symlinks, so avoid them.
      elif m.issym() and sys.platform == 'win32':
        # For a hard link, the link target (m.linkname) always appears
        # in the archive before the link itself (m.name), so the links
        # can just be made on the fly.  However, a symlink might well
        # appear in the archive before its target file, so there would
        # not yet be any file to hard-link to.  Hence, we have to collect
        # all the symlinks and create them in dependency order at the end.
        linkname = m.linkname
        if not posixpath.isabs(linkname):
          linkname = posixpath.join(posixpath.dirname(m.name), linkname)
        linkname = posixpath.normpath(linkname)
        win32_symlinks[posixpath.normpath(m.name)] = linkname
      # Otherwise, extract normally.
      else:
        self.tar.extract(m)

    win32_symlinks_left = win32_symlinks.items()
    while win32_symlinks_left:
      this_symlink = win32_symlinks_left.pop(0)
      name, linkname = this_symlink
      if linkname in win32_symlinks:
        # The target is itself a symlink not yet created.
        # Wait for it to come 'round on the guitar.
        win32_symlinks_left.append(this_symlink)
      else:
        del win32_symlinks[name]
        CreateWin32Link(name, linkname, self.verbose)

    if self.verbose:
      sys.stdout.write('\n')
      sys.stdout.flush()
示例#29
0
def relpathto(thisdir, origin, dest):
    """
    Given two paths relative to a directory, work out a path from origin
    to destination.

    Assumes UNIX/URL type relative paths.
    If origin doesn't *end* with '/' we assume it's a file rather than a
    directory.

    If the same paths are passed in :
        if the path ends with ('/') then we return ''
        else we return the last part of the path (presumably a filename)

    If thisdir doesn't start with '/' then we add one
        (this makes the top level of thisdir our root directory)
    """
    orig_thisdir = thisdir
    if not thisdir.startswith("/"):
        thisdir = "/" + thisdir
    orig_abs = posixpath.normpath(posixpath.join(thisdir, origin))
    dest_abs = posixpath.normpath(posixpath.join(thisdir, dest))
    if origin.endswith("/") and not orig_abs.endswith("/"):
        orig_abs = orig_abs + "/"
    if dest.endswith("/") and not dest_abs.endswith("/"):
        dest_abs = dest_abs + "/"
    #    print orig_abs, dest_abs
    #
    # if the first item is a filename, we want to get rid of it
    orig_list = orig_abs.split("/")[:-1]
    dest_list = dest_abs.split("/")
    #    print orig_list, dest_list

    if orig_list[0] != dest_list[0]:
        # can't get here from there
        # XXXX raise exception?
        return dest
    #
    # find the location where the two paths start to differ.
    i = 0
    for start_seg, dest_seg in zip(orig_list, dest_list):
        if start_seg != dest_seg:
            break
        i += 1
    #
    # now i is the point where the two paths diverge;
    # need a certain number of "os.pardir"s to work up
    # from the origin to the point of divergence.
    segments = [".."] * (len(orig_list) - i)
    # need to add the diverging part of dest_list.
    segments += dest_list[i:]
    if len(segments) == 0:
        # if they happen to be identical paths
        # identical directories
        if dest.endswith("/"):
            return ""
        # just the filename - the last part of dest
        return dest_list[-1]
    else:
        return "/".join(segments)
示例#30
0
文件: node.py 项目: trmznt/cmsfix
 def generate_path(self):
     if not self.slug:
         raise RuntimeError('Node slug needs to be initialized first!')
     if self.parent.path == '/':
         self.path = posixpath.normpath('/%s' % self.slug)
     else:
         self.path = posixpath.normpath('%s/%s' % (self.parent.path, self.slug))
     return self.path
示例#31
0
def docname_join(basedocname: str, docname: str) -> str:
    return posixpath.normpath(
        posixpath.join('/' + basedocname, '..', docname))[1:]
示例#32
0
 def is_internal_path(self, path):
     tmp = posixpath.normpath(
         posixpath.join(self.var["CMAKE_CURRENT_SOURCE_DIR"], path))
     return tmp.startswith(PathConstants.PACKAGE_SOURCE) or tmp.startswith(
         PathConstants.PACKAGE_BINARY)
示例#33
0
def docname_join(basedocname, docname):
    # type: (unicode, unicode) -> unicode
    return posixpath.normpath(
        posixpath.join('/' + basedocname, '..', docname))[1:]
示例#34
0
                'relative path "%s". You can use the explicit syntax '
                '--preload-file srcpath@dstpath to explicitly specify the target '
                'location the absolute source path should be directed to.' %
                (path, file_['dstpath']),
                file=sys.stderr)

for file_ in data_files:
    # name in the filesystem, native and emulated
    file_['dstpath'] = file_['dstpath'].replace(os.path.sep, '/')
    # If user has submitted a directory name as the destination but omitted
    # the destination filename, use the filename from source file
    if file_['dstpath'].endswith('/'):
        file_['dstpath'] = file_['dstpath'] + os.path.basename(
            file_['srcpath'])
    # make destination path always relative to the root
    file_['dstpath'] = posixpath.normpath(os.path.join('/', file_['dstpath']))
    if DEBUG:
        print('Packaging file "%s" to VFS in path "%s".' %
              (file_['srcpath'], file_['dstpath']),
              file=sys.stderr)

# Remove duplicates (can occur naively, for example preload dir/, preload dir/subdir/)
seen = {}


def was_seen(name):
    if seen.get(name):
        return True
    seen[name] = 1
    return False
示例#35
0
def plugin_static_serve(request, plugin, path, show_indexes=False):
    """
    Serve static files below a given point in the directory structure.

    To use, put a URL pattern such as::

        (r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'})

    in your URLconf. You must provide the ``document_root`` param. You may
    also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
    of the directory.  This index view will use the template hardcoded below,
    but if you'd like to override it, you can create a template called
    ``static/directory_index.html``.
    """

    import mimetypes
    import os
    import posixpath
    import stat
    from six.moves.urllib.request import unquote

    from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
    from django.utils.http import http_date
    from django.views.static import was_modified_since, directory_index

    from django.conf import settings

    document_root = os.path.join(settings.PROJECT_ROOT, 'plugins', plugin,
                                 'media')

    # Clean up given path to only allow serving files below document_root.
    path = posixpath.normpath(unquote(path))
    path = path.lstrip('/')
    newpath = ''
    for part in path.split('/'):
        if not part:
            # Strip empty path components.
            continue
        drive, part = os.path.splitdrive(part)
        head, part = os.path.split(part)
        if part in (os.curdir, os.pardir):
            # Strip '.' and '..' in path.
            continue
        newpath = os.path.join(newpath, part).replace('\\', '/')

    if newpath and path != newpath:
        return HttpResponseRedirect(newpath)
    fullpath = os.path.join(document_root, newpath)

    if os.path.isdir(fullpath):
        if show_indexes:
            return directory_index(newpath, fullpath)
        raise Http404(_("Directory indexes are not allowed here."))
    if not os.path.exists(fullpath):
        raise Http404('"%s" does not exist' % fullpath)

    # Respect the If-Modified-Since header.
    statobj = os.stat(fullpath)
    mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'
    if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
                              statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
        return HttpResponseNotModified(content_type=mimetype)
    contents = open(fullpath, 'rb').read()
    response = HttpResponse(contents, content_type=mimetype)
    response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
    response["Content-Length"] = len(contents)
    return response
示例#36
0
    def __init__(
        self,
        directories=None,
        module_directory=None,
        filesystem_checks=True,
        collection_size=-1,
        format_exceptions=False,
        error_handler=None,
        disable_unicode=False,
        bytestring_passthrough=False,
        output_encoding=None,
        encoding_errors="strict",
        cache_args=None,
        cache_impl="beaker",
        cache_enabled=True,
        cache_type=None,
        cache_dir=None,
        cache_url=None,
        modulename_callable=None,
        module_writer=None,
        default_filters=None,
        buffer_filters=(),
        strict_undefined=False,
        imports=None,
        future_imports=None,
        enable_loop=True,
        input_encoding=None,
        preprocessor=None,
        lexer_cls=None,
        include_error_handler=None,
    ):

        self.directories = [
            posixpath.normpath(d) for d in util.to_list(directories, ())
        ]
        self.module_directory = module_directory
        self.modulename_callable = modulename_callable
        self.filesystem_checks = filesystem_checks
        self.collection_size = collection_size

        if cache_args is None:
            cache_args = {}
        # transfer deprecated cache_* args
        if cache_dir:
            cache_args.setdefault("dir", cache_dir)
        if cache_url:
            cache_args.setdefault("url", cache_url)
        if cache_type:
            cache_args.setdefault("type", cache_type)

        self.template_args = {
            "format_exceptions": format_exceptions,
            "error_handler": error_handler,
            "include_error_handler": include_error_handler,
            "disable_unicode": disable_unicode,
            "bytestring_passthrough": bytestring_passthrough,
            "output_encoding": output_encoding,
            "cache_impl": cache_impl,
            "encoding_errors": encoding_errors,
            "input_encoding": input_encoding,
            "module_directory": module_directory,
            "module_writer": module_writer,
            "cache_args": cache_args,
            "cache_enabled": cache_enabled,
            "default_filters": default_filters,
            "buffer_filters": buffer_filters,
            "strict_undefined": strict_undefined,
            "imports": imports,
            "future_imports": future_imports,
            "enable_loop": enable_loop,
            "preprocessor": preprocessor,
            "lexer_cls": lexer_cls,
        }

        if collection_size == -1:
            self._collection = {}
            self._uri_cache = {}
        else:
            self._collection = util.LRUCache(collection_size)
            self._uri_cache = util.LRUCache(collection_size)
        self._mutex = threading.Lock()
示例#37
0
    def _parse_volume_spec_win32(
        self, volume_specs: List[str]
    ) -> Tuple[Iterable[str], Iterable[str], Dict[str, Dict[str, str]]]:
        named_volumes = []  # type: List[str]
        container_mount_paths = []  # type: List[str]
        host_spec = {}  # type: Dict[str, Dict[str, str]]

        for volume_spec in volume_specs:
            fields = volume_spec.split(":")

            if fields[-1] in ("ro", "rw"):
                mode = fields.pop()
            else:
                mode = "rw"

            if len(fields) == 3 and len(fields[0]) == 1:
                # C:\path1:/path2   <-- extenal and internal path
                external = ntpath.normpath(":".join(fields[0:2]))
                internal = posixpath.normpath(fields[2])
            elif len(fields) == 2:
                combined_path = ":".join(fields)
                (drive, path) = ntpath.splitdrive(combined_path)
                if drive:
                    # C:\path1          <-- assumed container path of /path1
                    external = ntpath.normpath(combined_path)

                    # C:\path1  --> /c/path1
                    path = str("/" + drive.lower().rstrip(":") + path).replace(
                        "\\", "/")
                    internal = posixpath.normpath(path)
                else:
                    # /path1:\path2     <-- extenal and internal path (relative to current drive)
                    # C:/path2          <-- valid named volume
                    external = ntpath.normpath(fields[0])
                    internal = posixpath.normpath(fields[1])
            elif len(fields) == 1:
                # \path1          <-- assumed container path of /path1 (relative to current drive)
                external = ntpath.normpath(fields[0])
                internal = external
            else:
                raise ValueError(
                    "Unable to parse volume specification '{}'".format(
                        volume_spec))

            container_mount_paths.append(internal)

            if external and self._is_named_volume_win32(external):
                named_volumes.append(external)
                if mode != "rw":
                    raise ValueError(
                        "Named volumes can only have 'rw' mode, provided '{}'".
                        format(mode))
            else:
                if not external:
                    # no internal container path given, assume the host path is the same as the
                    # internal path
                    external = internal
                host_spec[external] = {
                    "bind": internal,
                    "mode": mode,
                }

        return named_volumes, container_mount_paths, host_spec
示例#38
0
文件: cli.py 项目: tykune21/ampy
def put(local, remote):
    """Put a file or folder and its contents on the board.

    Put will upload a local file or folder  to the board.  If the file already
    exists on the board it will be overwritten with no warning!  You must pass
    at least one argument which is the path to the local file/folder to
    upload.  If the item to upload is a folder then it will be copied to the
    board recursively with its entire child structure.  You can pass a second
    optional argument which is the path and name of the file/folder to put to
    on the connected board.

    For example to upload a main.py from the current directory to the board's
    root run:

      ampy --port /board/serial/port put main.py

    Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py
    in the board's root run:

      ampy --port /board/serial/port put ./foo/board_boot.py boot.py

    To upload a local folder adafruit_library and all of its child files/folders
    as an item under the board's root run:

      ampy --port /board/serial/port put adafruit_library

    Or to put a local folder adafruit_library on the board under the path
    /lib/adafruit_library on the board run:

      ampy --port /board/serial/port put adafruit_library /lib/adafruit_library
    """
    # Use the local filename if no remote filename is provided.
    if remote is None:
        remote = os.path.basename(os.path.abspath(local))
    # Check if path is a folder and do recursive copy of everything inside it.
    # Otherwise it's a file and should simply be copied over.
    if os.path.isdir(local):
        # Directory copy, create the directory and walk all children to copy
        # over the files.
        board_files = files.Files(_board)
        for parent, child_dirs, child_files in os.walk(local):
            # Create board filesystem absolute path to parent directory.
            remote_parent = posixpath.normpath(posixpath.join(remote, os.path.relpath(parent, local)))
            try:
                # Create remote parent directory.
                board_files.mkdir(remote_parent)
                # Loop through all the files and put them on the board too.
                for filename in child_files:
                    with open(os.path.join(parent, filename), 'rb') as infile:
                        remote_filename = posixpath.join(remote_parent, filename)
                        board_files.put(remote_filename, infile.read())
            except files.DirectoryExistsError:
                # Ignore errors for directories that already exist.
                pass

    else:
        # File copy, open the file and copy its contents to the board.
        # Put the file on the board.
        with open(local, 'rb') as infile:
            board_files = files.Files(_board)
            board_files.put(remote, infile.read())
示例#39
0
文件: APSmisc.py 项目: symek/haps
def absoluteObjectPath(obj_rel_to, now, path):
    if posixpath.isabs(path):
        return path
    rel_to = obj_rel_to.getDefaultedString("object:name", now, [''])[0]
    return posixpath.normpath(posixpath.join(rel_to, path))
示例#40
0
文件: path.py 项目: Floflis/gecko-b2g
def normpath(path):
    return posixpath.normpath(normsep(path))
示例#41
0
    def __init__(self,
                 directories=None,
                 module_directory=None,
                 filesystem_checks=True,
                 collection_size=-1,
                 format_exceptions=False,
                 error_handler=None,
                 disable_unicode=False,
                 bytestring_passthrough=False,
                 output_encoding=None,
                 encoding_errors='strict',
                 cache_args=None,
                 cache_impl='beaker',
                 cache_enabled=True,
                 cache_type=None,
                 cache_dir=None,
                 cache_url=None,
                 modulename_callable=None,
                 module_writer=None,
                 default_filters=None,
                 buffer_filters=(),
                 strict_undefined=False,
                 imports=None,
                 future_imports=None,
                 enable_loop=True,
                 input_encoding=None,
                 preprocessor=None,
                 lexer_cls=None,
                 include_error_handler=None):

        self.directories = [
            posixpath.normpath(d) for d in util.to_list(directories, ())
        ]
        self.module_directory = module_directory
        self.modulename_callable = modulename_callable
        self.filesystem_checks = filesystem_checks
        self.collection_size = collection_size

        if cache_args is None:
            cache_args = {}
        # transfer deprecated cache_* args
        if cache_dir:
            cache_args.setdefault('dir', cache_dir)
        if cache_url:
            cache_args.setdefault('url', cache_url)
        if cache_type:
            cache_args.setdefault('type', cache_type)

        self.template_args = {
            'format_exceptions': format_exceptions,
            'error_handler': error_handler,
            'include_error_handler': include_error_handler,
            'disable_unicode': disable_unicode,
            'bytestring_passthrough': bytestring_passthrough,
            'output_encoding': output_encoding,
            'cache_impl': cache_impl,
            'encoding_errors': encoding_errors,
            'input_encoding': input_encoding,
            'module_directory': module_directory,
            'module_writer': module_writer,
            'cache_args': cache_args,
            'cache_enabled': cache_enabled,
            'default_filters': default_filters,
            'buffer_filters': buffer_filters,
            'strict_undefined': strict_undefined,
            'imports': imports,
            'future_imports': future_imports,
            'enable_loop': enable_loop,
            'preprocessor': preprocessor,
            'lexer_cls': lexer_cls
        }

        if collection_size == -1:
            self._collection = {}
            self._uri_cache = {}
        else:
            self._collection = util.LRUCache(collection_size)
            self._uri_cache = util.LRUCache(collection_size)
        self._mutex = threading.Lock()
示例#42
0
def _to_git_path(path):
    path = posixpath.normpath(urlparse.urlsplit(path).path)
    return path[1:]
示例#43
0
    def _rebase(self, url):
        if "#" in url:
            url, hashid = url.rsplit("#", 1)
            hashid = "#" + hashid
        else:
            hashid = ""

        if "?" in url:
            url, _ = url.rsplit("?", 1)

        rebased = None
        if url.startswith("."):
            rebased = posixpath.join(self.base, url)
            rebased = posixpath.normpath(rebased)
        else:
            rebased = url.strip("/")

        path = None
        if '/' in self.name:  # try find file using relative url in self.name
            path = find_file(
                os.path.join(self.name[:self.name.rindex('/')], rebased))
            if path:
                rebased = os.path.join(self.name[:self.name.rindex('/')],
                                       rebased)

        if not path:  # try finding file based on GLOBAL_MEDIA_DIRS
            path = find_file(rebased)

        if not path:
            raise Exception(
                "Unable to find url `%s` from file %s. File does not exists: %s"
                % (url, self.name, rebased))

    # generating data for images doesn't work for scss
        if getattr(settings, 'GENERATE_DATA_URIS',
                   False) and self.name.endswith('.css'):
            if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \
                    not IGNORE_PATTERN.match(rebased):
                data = b64encode(open(path, 'rb').read())
                mime = guess_type(path)[0] or 'application/octet-stream'

                return 'data:%s;base64,%s' % (mime, data)
        elif getattr(settings, 'GENERATE_DATA_URIS',
                     False) and self.name.endswith('.scss') and False:
            if os.path.getsize(
                    path
            ) <= MAX_DATA_URI_FILE_SIZE and not IGNORE_PATTERN.match(rebased):
                #data = b64encode(open(path, 'rb').read())
                #mime = guess_type(path)[0] or 'application/octet-stream'
                return 'inline-image("%s")' % (url)

        if appsettings.MEDIA_DEV_MODE:
            prefix = appsettings.DEV_MEDIA_URL
            version = os.path.getmtime(path)
            rebased += "?v=%s" % version

        else:
            prefix = appsettings.PRODUCTION_MEDIA_URL
            with open(path) as sf:
                version = sha1(sf.read()).hexdigest()

            rebased_prefix, rebased_extention = rebased.rsplit(".", 1)
            rebased = "%s.%s" % (rebased_prefix, rebased_extention)

        rebased = posixpath.join(prefix, rebased)
        return "/" + rebased.strip("/") + hashid
示例#44
0
文件: nav.py 项目: xeechou/mkblogs
 def make_absolute(self, path):
     """
     Given a relative file path return it as a POSIX-style
     absolute filepath, given the context of the current page.
     """
     return posixpath.normpath(posixpath.join(self.base_path, path))
示例#45
0
 def get_absolute_path(cls, root, path):
     from django.contrib.staticfiles import finders
     normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
     absolute_path = finders.find(normalized_path)
     return absolute_path
示例#46
0
def docname_join(basedocname, docname):
    # type: (str, str) -> str
    return posixpath.normpath(posixpath.join('/' + basedocname, '..',
                                             docname))[1:]
示例#47
0
 def normpath(x):
     return posixpath.normpath(delta + elem.get('href'))
示例#48
0
 def joinpath(self, *path):
     return posixpath.normpath(posixpath.join(*path))
示例#49
0
    def _follow_link(self, url, link):
        #Longer than limit set by the standard RFC7230 are discarded
        if len(link) > 2000:
            return None

        # Remove anchor
        link = re.sub(r'#[^#]*$', '', link)

        # Skip prefix
        if re.search(self.prefix_filter, link):
            return None

        # Filter url
        for f in self.url_filters:
            if re.search(f, link):
                return None

        rx = re.match('(https?://)([^/:]+)(:[0-9]+)?([^\?]*)(\?.*)?', url)
        url_proto = rx.group(1)
        url_host = rx.group(2)
        url_port = rx.group(3) if rx.group(3) else ''
        url_path = rx.group(4) if len(rx.group(4)) > 0 else '/'
        url_dir_path = dirname(url_path)

        rx = re.match('((https?://)([^/:]+)(:[0-9]+)?)?([^\?]*)(\?.*)?', link)
        link_full_url = rx.group(1) != None
        link_proto = rx.group(2) if rx.group(2) else url_proto
        link_host = rx.group(3) if rx.group(3) else url_host
        link_port = rx.group(4) if rx.group(4) else url_port
        link_path = rx.group(5) if rx.group(5) else url_path
        link_query = quote(rx.group(6), '?=&%/') if rx.group(6) else ''

        if not link_full_url and not link.startswith('/'):
            link_path = normpath(join(url_dir_path, link_path))
        link_dir_path = dirname(link_path)

        link_url = link_proto + link_host + link_port + link_path + link_query
        if self.follow_mode == self.F_ANY:
            return link_url
        elif self.follow_mode == self.F_TLD:
            dom = self._url_domain(link_host)
            if dom == self.currdomain:
                return link_url
            elif dom.split(".")[-1] == self.TLdomain:
                self.targets_lock.acquire()
                if dom not in self.outerdomaintargets:
                    self.outerdomaintargets[dom] = set()
                self.outerdomaintargets[dom].add(link_url)
                self.targets_lock.release()
                sys.stderr.write("'" + link +
                                 "' stored in the list of domains\n")
                return None
            else:
                sys.stderr.write("'" + link +
                                 "' discarded: not in the same TLD\n")
                return None
        elif self.follow_mode == self.F_SAME_DOMAIN:
            return link_url if self._url_domain(self.host) == \
                    self._url_domain(link_host) else None
        elif self.follow_mode == self.F_SAME_HOST:
            return link_url if self.host == link_host else None
        elif self.follow_mode == self.F_SAME_PATH:
            if self.host == link_host and \
                    link_dir_path.startswith(self.dir_path):
                return link_url
            else:
                return None
示例#50
0
 def _stored_name(self, name, hashed_files):
     # Normalize the path to avoid multiple names for the same file like
     # ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same
     # path.
     name = posixpath.normpath(name)
示例#51
0
 def normpath(self, path):
     """Normalize path, eliminating double slashes, etc"""
     return posixpath.normpath(path)
示例#52
0
    def _http_request(self, url, method, **kwargs):
        """Send an http request with the specified characteristics.

        Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
        as setting headers and error handling.
        """
        # Copy the kwargs so we can reuse the original in case of redirects
        kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
        kwargs['headers'].setdefault('User-Agent', USER_AGENT)
        if self.auth_token:
            kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)

        self.log_curl_request(method, url, kwargs)
        conn = self.get_connection()

        # Note(flaper87): Before letting headers / url fly,
        # they should be encoded otherwise httplib will
        # complain. If we decide to rely on python-request
        # this wont be necessary anymore.
        kwargs['headers'] = self.encode_headers(kwargs['headers'])

        try:
            if self.endpoint_path:
                url = '%s/%s' % (self.endpoint_path, url)
            conn_url = posixpath.normpath(url)
            # Note(flaper87): Ditto, headers / url
            # encoding to make httplib happy.
            conn_url = strutils.safe_encode(conn_url)
            if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
                conn.putrequest(method, conn_url)
                for header, value in kwargs['headers'].items():
                    conn.putheader(header, value)
                conn.endheaders()
                chunk = kwargs['body'].read(CHUNKSIZE)
                # Chunk it, baby...
                while chunk:
                    conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
                    chunk = kwargs['body'].read(CHUNKSIZE)
                conn.send('0\r\n\r\n')
            else:
                conn.request(method, conn_url, **kwargs)
            resp = conn.getresponse()
        except socket.gaierror as e:
            message = "Error finding address for %s: %s" % (
                self.endpoint_hostname, e)
            raise exc.InvalidEndpoint(message=message)
        except (socket.error, socket.timeout) as e:
            endpoint = self.endpoint
            message = "Error communicating with %(endpoint)s %(e)s" % locals()
            raise exc.CommunicationError(message=message)

        body_iter = ResponseBodyIterator(resp)

        # Read body into string if it isn't obviously image data
        if resp.getheader('content-type', None) != 'application/octet-stream':
            body_str = ''.join([chunk for chunk in body_iter])
            self.log_http_response(resp, body_str)
            body_iter = StringIO.StringIO(body_str)
        else:
            self.log_http_response(resp)

        if 400 <= resp.status < 600:
            LOG.error("Request returned failure status.")
            raise exc.from_response(resp, body_str)
        elif resp.status in (301, 302, 305):
            # Redirected. Reissue the request to the new location.
            return self._http_request(resp['location'], method, **kwargs)
        elif resp.status == 300:
            raise exc.from_response(resp)

        return resp, body_iter
示例#53
0
def _parsed_url(url):
    parsed_url = urlparse(url)
    prefix = parsed_url.scheme + '://' + parsed_url.netloc
    base_path = posixpath.normpath(parsed_url.path + '/..')
    return urljoin(prefix, base_path)
示例#54
0
 def realpath(path):
     return posixpath.normpath(posixpath.join(mock_os.getcwd(), path))
示例#55
0
 def normpath(path):
     res = posixpath.normpath(path)
     # Python normpath() doesn't eliminate leading double slashes
     if res.startswith('//'):
         return res[1:]
     return res
示例#56
0
def main():
    data_files = []
    export_name = 'Module'
    leading = ''
    has_preloaded = False
    plugins = []
    jsoutput = None
    from_emcc = False
    force = True
    # If set to True, IndexedDB (IDBFS in library_idbfs.js) is used to locally
    # cache VFS XHR so that subsequent page loads can read the data from the
    # offline cache instead.
    use_preload_cache = False
    indexeddb_name = 'EM_PRELOAD_CACHE'
    # If set to True, the package metadata is stored separately from js-output
    # file which makes js-output file immutable to the package content changes.
    # If set to False, the package metadata is stored inside the js-output file
    # which makes js-output file to mutate on each invocation of this packager tool.
    separate_metadata = False
    lz4 = False
    use_preload_plugins = False

    for arg in sys.argv[2:]:
        if arg == '--preload':
            has_preloaded = True
            leading = 'preload'
        elif arg == '--embed':
            leading = 'embed'
        elif arg == '--exclude':
            leading = 'exclude'
        elif arg == '--no-force':
            force = False
            leading = ''
        elif arg == '--use-preload-cache':
            use_preload_cache = True
            leading = ''
        elif arg.startswith('--indexedDB-name'):
            indexeddb_name = arg.split('=', 1)[1] if '=' in arg else None
            leading = ''
        elif arg == '--no-heap-copy':
            print(
                'ignoring legacy flag --no-heap-copy (that is the only mode supported now)'
            )
            leading = ''
        elif arg == '--separate-metadata':
            separate_metadata = True
            leading = ''
        elif arg == '--lz4':
            lz4 = True
            leading = ''
        elif arg == '--use-preload-plugins':
            use_preload_plugins = True
            leading = ''
        elif arg.startswith('--js-output'):
            jsoutput = arg.split('=', 1)[1] if '=' in arg else None
            leading = ''
        elif arg.startswith('--export-name'):
            if '=' in arg:
                export_name = arg.split('=', 1)[1]
            leading = ''
        elif arg.startswith('--from-emcc'):
            from_emcc = True
            leading = ''
        elif arg.startswith('--plugin'):
            with open(arg.split('=', 1)[1]) as f:
                plugin = f.read()
            eval(plugin)  # should append itself to plugins
            leading = ''
        elif leading == 'preload' or leading == 'embed':
            mode = leading
            # position of @ if we're doing 'src@dst'. '__' is used to keep the index
            # same with the original if they escaped with '@@'.
            at_position = arg.replace('@@', '__').find('@')
            # '@@' in input string means there is an actual @ character, a single '@'
            # means the 'src@dst' notation.
            uses_at_notation = (at_position != -1)

            if uses_at_notation:
                srcpath = arg[0:at_position].replace('@@',
                                                     '@')  # split around the @
                dstpath = arg[at_position + 1:].replace('@@', '@')
            else:
                # Use source path as destination path.
                srcpath = dstpath = arg.replace('@@', '@')
            if os.path.isfile(srcpath) or os.path.isdir(srcpath):
                data_files.append({
                    'srcpath': srcpath,
                    'dstpath': dstpath,
                    'mode': mode,
                    'explicit_dst_path': uses_at_notation
                })
            else:
                print('error: ' + arg + ' does not exist', file=sys.stderr)
                return 1
        elif leading == 'exclude':
            excluded_patterns.append(arg)
        else:
            print('Unknown parameter:', arg, file=sys.stderr)
            return 1

    if (not force) and not data_files:
        has_preloaded = False
    if not has_preloaded or jsoutput is None:
        assert not separate_metadata, (
            'cannot separate-metadata without both --preloaded files '
            'and a specified --js-output')

    if not from_emcc:
        print(
            'Remember to build the main file with  -s FORCE_FILESYSTEM=1  '
            'so that it includes support for loading this file package',
            file=sys.stderr)

    if jsoutput and os.path.abspath(jsoutput) == os.path.abspath(data_target):
        print('error: TARGET should not be the same value of --js-output',
              file=sys.stderr)
        return 1

    ret = ''
    # emcc will add this to the output itself, so it is only needed for
    # standalone calls
    if not from_emcc:
        ret = '''
  var Module = typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {};
  ''' % {
            "EXPORT_NAME": export_name
        }

    ret += '''
  if (!Module.expectedDataFileDownloads) {
    Module.expectedDataFileDownloads = 0;
  }
  Module.expectedDataFileDownloads++;
  (function() {
   var loadPackage = function(metadata) {
  '''

    code = '''
      function assert(check, msg) {
        if (!check) throw msg + new Error().stack;
      }
  '''

    for file_ in data_files:
        if not should_ignore(file_['srcpath']):
            if os.path.isdir(file_['srcpath']):
                add(file_['mode'], file_['srcpath'], file_['dstpath'])
            else:
                new_data_files.append(file_)
    data_files = [
        file_ for file_ in new_data_files
        if not os.path.isdir(file_['srcpath'])
    ]
    if len(data_files) == 0:
        print('Nothing to do!', file=sys.stderr)
        sys.exit(1)

    # Absolutize paths, and check that they make sense
    # os.getcwd() always returns the hard path with any symbolic links resolved,
    # even if we cd'd into a symbolic link.
    curr_abspath = os.path.abspath(os.getcwd())

    for file_ in data_files:
        if not file_['explicit_dst_path']:
            # This file was not defined with src@dst, so we inferred the destination
            # from the source. In that case, we require that the destination not be
            # under the current location
            path = file_['dstpath']
            # Use os.path.realpath to resolve any symbolic links to hard paths,
            # to match the structure in curr_abspath.
            abspath = os.path.realpath(os.path.abspath(path))
            if DEBUG:
                print(path, abspath, curr_abspath, file=sys.stderr)
            if not abspath.startswith(curr_abspath):
                print(
                    'Error: Embedding "%s" which is below the current directory '
                    '"%s". This is invalid since the current directory becomes the '
                    'root that the generated code will see' %
                    (path, curr_abspath),
                    file=sys.stderr)
                sys.exit(1)
            file_['dstpath'] = abspath[len(curr_abspath) + 1:]
            if os.path.isabs(path):
                print(
                    'Warning: Embedding an absolute file/directory name "%s" to the '
                    'virtual filesystem. The file will be made available in the '
                    'relative path "%s". You can use the explicit syntax '
                    '--preload-file srcpath@dstpath to explicitly specify the target '
                    'location the absolute source path should be directed to.'
                    % (path, file_['dstpath']),
                    file=sys.stderr)

    for file_ in data_files:
        # name in the filesystem, native and emulated
        file_['dstpath'] = file_['dstpath'].replace(os.path.sep, '/')
        # If user has submitted a directory name as the destination but omitted
        # the destination filename, use the filename from source file
        if file_['dstpath'].endswith('/'):
            file_['dstpath'] = file_['dstpath'] + os.path.basename(
                file_['srcpath'])
        # make destination path always relative to the root
        file_['dstpath'] = posixpath.normpath(
            os.path.join('/', file_['dstpath']))
        if DEBUG:
            print('Packaging file "%s" to VFS in path "%s".' %
                  (file_['srcpath'], file_['dstpath']),
                  file=sys.stderr)

    # Remove duplicates (can occur naively, for example preload dir/, preload dir/subdir/)
    seen = {}

    def was_seen(name):
        if seen.get(name):
            return True
        seen[name] = 1
        return False

    data_files = [
        file_ for file_ in data_files if not was_seen(file_['dstpath'])
    ]

    if AV_WORKAROUND:
        random.shuffle(data_files)

    # Apply plugins
    for file_ in data_files:
        for plugin in plugins:
            plugin(file_)

    metadata = {'files': []}

    # Set up folders
    partial_dirs = []
    for file_ in data_files:
        dirname = os.path.dirname(file_['dstpath'])
        dirname = dirname.lstrip(
            '/')  # absolute paths start with '/', remove that
        if dirname != '':
            parts = dirname.split('/')
            for i in range(len(parts)):
                partial = '/'.join(parts[:i + 1])
                if partial not in partial_dirs:
                    code += (
                        '''Module['FS_createPath']('/%s', '%s', true, true);\n'''
                        % ('/'.join(parts[:i]), parts[i]))
                    partial_dirs.append(partial)

    if has_preloaded:
        # Bundle all datafiles into one archive. Avoids doing lots of simultaneous
        # XHRs which has overhead.
        start = 0
        with open(data_target, 'wb') as data:
            for file_ in data_files:
                file_['data_start'] = start
                with open(file_['srcpath'], 'rb') as f:
                    curr = f.read()
                file_['data_end'] = start + len(curr)
                if AV_WORKAROUND:
                    curr += '\x00'
                start += len(curr)
                data.write(curr)

        # TODO: sha256sum on data_target
        if start > 256 * 1024 * 1024:
            print(
                'warning: file packager is creating an asset bundle of %d MB. '
                'this is very large, and browsers might have trouble loading it. '
                'see https://hacks.mozilla.org/2015/02/synchronous-execution-and-filesystem-access-in-emscripten/'
                % (start / (1024 * 1024)),
                file=sys.stderr)

        create_preloaded = '''
          Module['FS_createPreloadedFile'](this.name, null, byteArray, true, true, function() {
            Module['removeRunDependency']('fp ' + that.name);
          }, function() {
            if (that.audio) {
              Module['removeRunDependency']('fp ' + that.name); // workaround for chromium bug 124926 (still no audio with this, but at least we don't hang)
            } else {
              err('Preloading file ' + that.name + ' failed');
            }
          }, false, true); // canOwn this data in the filesystem, it is a slide into the heap that will never change
  '''
        create_data = '''
          Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); // canOwn this data in the filesystem, it is a slide into the heap that will never change
          Module['removeRunDependency']('fp ' + that.name);
  '''

        # Data requests - for getting a block of data out of the big archive - have
        # a similar API to XHRs
        code += '''
      /** @constructor */
      function DataRequest(start, end, audio) {
        this.start = start;
        this.end = end;
        this.audio = audio;
      }
      DataRequest.prototype = {
        requests: {},
        open: function(mode, name) {
          this.name = name;
          this.requests[name] = this;
          Module['addRunDependency']('fp ' + this.name);
        },
        send: function() {},
        onload: function() {
          var byteArray = this.byteArray.subarray(this.start, this.end);
          this.finish(byteArray);
        },
        finish: function(byteArray) {
          var that = this;
  %s
          this.requests[this.name] = null;
        }
      };
  %s
    ''' % (create_preloaded if use_preload_plugins else create_data, '''
          var files = metadata['files'];
          for (var i = 0; i < files.length; ++i) {
            new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio']).open('GET', files[i]['filename']);
          }
  ''' if not lz4 else '')

    counter = 0
    for file_ in data_files:
        filename = file_['dstpath']
        dirname = os.path.dirname(filename)
        basename = os.path.basename(filename)
        if file_['mode'] == 'embed':
            # Embed
            data = list(bytearray(open(file_['srcpath'], 'rb').read()))
            code += '''var fileData%d = [];\n''' % counter
            if data:
                parts = []
                chunk_size = 10240
                start = 0
                while start < len(data):
                    parts.append(
                        '''fileData%d.push.apply(fileData%d, %s);\n''' %
                        (counter, counter, str(
                            data[start:start + chunk_size])))
                    start += chunk_size
                code += ''.join(parts)
            code += (
                '''Module['FS_createDataFile']('%s', '%s', fileData%d, true, true, false);\n'''
                % (dirname, basename, counter))
            counter += 1
        elif file_['mode'] == 'preload':
            # Preload
            counter += 1
            metadata['files'].append({
                'filename':
                file_['dstpath'],
                'start':
                file_['data_start'],
                'end':
                file_['data_end'],
                'audio':
                1 if filename[-4:] in AUDIO_SUFFIXES else 0,
            })
        else:
            assert 0

    if has_preloaded:
        if not lz4:
            # Get the big archive and split it up
            use_data = '''
          // Reuse the bytearray from the XHR as the source for file reads.
          DataRequest.prototype.byteArray = byteArray;
    '''
            use_data += '''
            var files = metadata['files'];
            for (var i = 0; i < files.length; ++i) {
              DataRequest.prototype.requests[files[i].filename].onload();
            }
      '''
            use_data += (
                "          Module['removeRunDependency']('datafile_%s');\n" %
                shared.JS.escape_for_js_string(data_target))

        else:
            # LZ4FS usage
            temp = data_target + '.orig'
            shutil.move(data_target, temp)
            meta = shared.run_js_tool(shared.path_from_root(
                'tools', 'lz4-compress.js'), [
                    shared.path_from_root('src', 'mini-lz4.js'), temp,
                    data_target
                ],
                                      stdout=PIPE)
            os.unlink(temp)
            use_data = '''
            var compressedData = %s;
            compressedData['data'] = byteArray;
            assert(typeof LZ4 === 'object', 'LZ4 not present - was your app build with  -s LZ4=1  ?');
            LZ4.loadPackage({ 'metadata': metadata, 'compressedData': compressedData });
            Module['removeRunDependency']('datafile_%s');
      ''' % (meta, shared.JS.escape_for_js_string(data_target))

        package_uuid = uuid.uuid4()
        package_name = data_target
        remote_package_size = os.path.getsize(package_name)
        remote_package_name = os.path.basename(package_name)
        ret += r'''
      var PACKAGE_PATH;
      if (typeof window === 'object') {
        PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.toString().substring(0, window.location.pathname.toString().lastIndexOf('/')) + '/');
      } else if (typeof location !== 'undefined') {
        // worker
        PACKAGE_PATH = encodeURIComponent(location.pathname.toString().substring(0, location.pathname.toString().lastIndexOf('/')) + '/');
      } else {
        throw 'using preloaded data can only be done on a web page or in a web worker';
      }
      var PACKAGE_NAME = '%s';
      var REMOTE_PACKAGE_BASE = '%s';
      if (typeof Module['locateFilePackage'] === 'function' && !Module['locateFile']) {
        Module['locateFile'] = Module['locateFilePackage'];
        err('warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)');
      }
      var REMOTE_PACKAGE_NAME = Module['locateFile'] ? Module['locateFile'](REMOTE_PACKAGE_BASE, '') : REMOTE_PACKAGE_BASE;
    ''' % (shared.JS.escape_for_js_string(data_target),
           shared.JS.escape_for_js_string(remote_package_name))
        metadata['remote_package_size'] = remote_package_size
        metadata['package_uuid'] = str(package_uuid)
        ret += '''
      var REMOTE_PACKAGE_SIZE = metadata['remote_package_size'];
      var PACKAGE_UUID = metadata['package_uuid'];
    '''

        if use_preload_cache:
            code += r'''
        var indexedDB;
        if (typeof window === 'object') {
          indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
        } else if (typeof location !== 'undefined') {
          // worker
          indexedDB = self.indexedDB;
        } else {
          throw 'using IndexedDB to cache data can only be done on a web page or in a web worker';
        }
        var IDB_RO = "readonly";
        var IDB_RW = "readwrite";
        var DB_NAME = "''' + indexeddb_name + '''";
        var DB_VERSION = 1;
        var METADATA_STORE_NAME = 'METADATA';
        var PACKAGE_STORE_NAME = 'PACKAGES';
        function openDatabase(callback, errback) {
          try {
            var openRequest = indexedDB.open(DB_NAME, DB_VERSION);
          } catch (e) {
            return errback(e);
          }
          openRequest.onupgradeneeded = function(event) {
            var db = event.target.result;

            if(db.objectStoreNames.contains(PACKAGE_STORE_NAME)) {
              db.deleteObjectStore(PACKAGE_STORE_NAME);
            }
            var packages = db.createObjectStore(PACKAGE_STORE_NAME);

            if(db.objectStoreNames.contains(METADATA_STORE_NAME)) {
              db.deleteObjectStore(METADATA_STORE_NAME);
            }
            var metadata = db.createObjectStore(METADATA_STORE_NAME);
          };
          openRequest.onsuccess = function(event) {
            var db = event.target.result;
            callback(db);
          };
          openRequest.onerror = function(error) {
            errback(error);
          };
        };

        // This is needed as chromium has a limit on per-entry files in IndexedDB
        // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177
        // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
        // We set the chunk size to 64MB to stay well-below the limit
        var CHUNK_SIZE = 64 * 1024 * 1024;

        function cacheRemotePackage(
          db,
          packageName,
          packageData,
          packageMeta,
          callback,
          errback
        ) {
          var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW);
          var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME);
          var chunkSliceStart = 0;
          var nextChunkSliceStart = 0;
          var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE);
          var finishedChunks = 0;
          for (var chunkId = 0; chunkId < chunkCount; chunkId++) {
            nextChunkSliceStart += CHUNK_SIZE;
            var putPackageRequest = packages.put(
              packageData.slice(chunkSliceStart, nextChunkSliceStart),
              'package/' + packageName + '/' + chunkId
            );
            chunkSliceStart = nextChunkSliceStart;
            putPackageRequest.onsuccess = function(event) {
              finishedChunks++;
              if (finishedChunks == chunkCount) {
                var transaction_metadata = db.transaction(
                  [METADATA_STORE_NAME],
                  IDB_RW
                );
                var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME);
                var putMetadataRequest = metadata.put(
                  {
                    'uuid': packageMeta.uuid,
                    'chunkCount': chunkCount
                  },
                  'metadata/' + packageName
                );
                putMetadataRequest.onsuccess = function(event) {
                  callback(packageData);
                };
                putMetadataRequest.onerror = function(error) {
                  errback(error);
                };
              }
            };
            putPackageRequest.onerror = function(error) {
              errback(error);
            };
          }
        }

        /* Check if there's a cached package, and if so whether it's the latest available */
        function checkCachedPackage(db, packageName, callback, errback) {
          var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO);
          var metadata = transaction.objectStore(METADATA_STORE_NAME);
          var getRequest = metadata.get('metadata/' + packageName);
          getRequest.onsuccess = function(event) {
            var result = event.target.result;
            if (!result) {
              return callback(false, null);
            } else {
              return callback(PACKAGE_UUID === result['uuid'], result);
            }
          };
          getRequest.onerror = function(error) {
            errback(error);
          };
        }

        function fetchCachedPackage(db, packageName, metadata, callback, errback) {
          var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO);
          var packages = transaction.objectStore(PACKAGE_STORE_NAME);

          var chunksDone = 0;
          var totalSize = 0;
          var chunkCount = metadata['chunkCount'];
          var chunks = new Array(chunkCount);

          for (var chunkId = 0; chunkId < chunkCount; chunkId++) {
            var getRequest = packages.get('package/' + packageName + '/' + chunkId);
            getRequest.onsuccess = function(event) {
              // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now
              if (chunkCount == 1) {
                callback(event.target.result);
              } else {
                chunksDone++;
                totalSize += event.target.result.byteLength;
                chunks.push(event.target.result);
                if (chunksDone == chunkCount) {
                  if (chunksDone == 1) {
                    callback(event.target.result);
                  } else {
                    var tempTyped = new Uint8Array(totalSize);
                    var byteOffset = 0;
                    for (var chunkId in chunks) {
                      var buffer = chunks[chunkId];
                      tempTyped.set(new Uint8Array(buffer), byteOffset);
                      byteOffset += buffer.byteLength;
                      buffer = undefined;
                    }
                    chunks = undefined;
                    callback(tempTyped.buffer);
                    tempTyped = undefined;
                  }
                }
              }
            };
            getRequest.onerror = function(error) {
              errback(error);
            };
          }
        }
      '''

        ret += r'''
      function fetchRemotePackage(packageName, packageSize, callback, errback) {
        var xhr = new XMLHttpRequest();
        xhr.open('GET', packageName, true);
        xhr.responseType = 'arraybuffer';
        xhr.onprogress = function(event) {
          var url = packageName;
          var size = packageSize;
          if (event.total) size = event.total;
          if (event.loaded) {
            if (!xhr.addedTotal) {
              xhr.addedTotal = true;
              if (!Module.dataFileDownloads) Module.dataFileDownloads = {};
              Module.dataFileDownloads[url] = {
                loaded: event.loaded,
                total: size
              };
            } else {
              Module.dataFileDownloads[url].loaded = event.loaded;
            }
            var total = 0;
            var loaded = 0;
            var num = 0;
            for (var download in Module.dataFileDownloads) {
            var data = Module.dataFileDownloads[download];
              total += data.total;
              loaded += data.loaded;
              num++;
            }
            total = Math.ceil(total * Module.expectedDataFileDownloads/num);
            if (Module['setStatus']) Module['setStatus']('Downloading data... (' + loaded + '/' + total + ')');
          } else if (!Module.dataFileDownloads) {
            if (Module['setStatus']) Module['setStatus']('Downloading data...');
          }
        };
        xhr.onerror = function(event) {
          throw new Error("NetworkError for: " + packageName);
        }
        xhr.onload = function(event) {
          if (xhr.status == 200 || xhr.status == 304 || xhr.status == 206 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
            var packageData = xhr.response;
            callback(packageData);
          } else {
            throw new Error(xhr.statusText + " : " + xhr.responseURL);
          }
        };
        xhr.send(null);
      };

      function handleError(error) {
        console.error('package error:', error);
      };
    '''

        code += r'''
      function processPackageData(arrayBuffer) {
        assert(arrayBuffer, 'Loading data file failed.');
        assert(arrayBuffer instanceof ArrayBuffer, 'bad input to processPackageData');
        var byteArray = new Uint8Array(arrayBuffer);
        var curr;
        %s
      };
      Module['addRunDependency']('datafile_%s');
    ''' % (use_data, shared.JS.escape_for_js_string(data_target))
        # use basename because from the browser's point of view,
        # we need to find the datafile in the same dir as the html file

        code += r'''
      if (!Module.preloadResults) Module.preloadResults = {};
    '''

        if use_preload_cache:
            code += r'''
        function preloadFallback(error) {
          console.error(error);
          console.error('falling back to default preload behavior');
          fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE, processPackageData, handleError);
        };

        openDatabase(
          function(db) {
            checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME,
              function(useCached, metadata) {
                Module.preloadResults[PACKAGE_NAME] = {fromCache: useCached};
                if (useCached) {
                  fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, metadata, processPackageData, preloadFallback);
                } else {
                  fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE,
                    function(packageData) {
                      cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID}, processPackageData,
                        function(error) {
                          console.error(error);
                          processPackageData(packageData);
                        });
                    }
                  , preloadFallback);
                }
              }
            , preloadFallback);
          }
        , preloadFallback);

        if (Module['setStatus']) Module['setStatus']('Downloading...');
      '''
        else:
            # Not using preload cache, so we might as well start the xhr ASAP,
            # potentially before JS parsing of the main codebase if it's after us.
            # Only tricky bit is the fetch is async, but also when runWithFS is called
            # is async, so we handle both orderings.
            ret += r'''
        var fetchedCallback = null;
        var fetched = Module['getPreloadedPackage'] ? Module['getPreloadedPackage'](REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE) : null;

        if (!fetched) fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE, function(data) {
          if (fetchedCallback) {
            fetchedCallback(data);
            fetchedCallback = null;
          } else {
            fetched = data;
          }
        }, handleError);
      '''

            code += r'''
        Module.preloadResults[PACKAGE_NAME] = {fromCache: false};
        if (fetched) {
          processPackageData(fetched);
          fetched = null;
        } else {
          fetchedCallback = processPackageData;
        }
      '''

    ret += '''
    function runWithFS() {
  '''
    ret += code
    ret += '''
    }
    if (Module['calledRun']) {
      runWithFS();
    } else {
      if (!Module['preRun']) Module['preRun'] = [];
      Module["preRun"].push(runWithFS); // FS is not initialized yet, wait for it
    }
  '''

    if separate_metadata:
        _metadata_template = '''
    Module['removeRunDependency']('%(metadata_file)s');
   }

   function runMetaWithFS() {
    Module['addRunDependency']('%(metadata_file)s');
    var REMOTE_METADATA_NAME = Module['locateFile'] ? Module['locateFile']('%(metadata_file)s', '') : '%(metadata_file)s';
    var xhr = new XMLHttpRequest();
    xhr.onreadystatechange = function() {
     if (xhr.readyState === 4 && xhr.status === 200) {
       loadPackage(JSON.parse(xhr.responseText));
     }
    }
    xhr.open('GET', REMOTE_METADATA_NAME, true);
    xhr.overrideMimeType('application/json');
    xhr.send(null);
   }

   if (Module['calledRun']) {
    runMetaWithFS();
   } else {
    if (!Module['preRun']) Module['preRun'] = [];
    Module["preRun"].push(runMetaWithFS);
   }
  ''' % {
            'metadata_file': os.path.basename(jsoutput + '.metadata')
        }

    else:
        _metadata_template = '''
   }
   loadPackage(%s);
  ''' % json.dumps(metadata)

    ret += '''%s
  })();
  ''' % _metadata_template

    if force or len(data_files):
        if jsoutput is None:
            print(ret)
        else:
            # Overwrite the old jsoutput file (if exists) only when its content
            # differs from the current generated one, otherwise leave the file
            # untouched preserving its old timestamp
            if os.path.isfile(jsoutput):
                with open(jsoutput) as f:
                    old = f.read()
                if old != ret:
                    with open(jsoutput, 'w') as f:
                        f.write(ret)
            else:
                with open(jsoutput, 'w') as f:
                    f.write(ret)
            if separate_metadata:
                with open(jsoutput + '.metadata', 'w') as f:
                    json.dump(metadata, f, separators=(',', ':'))

    return 0
示例#57
0
 def _get_key_name(self, name):
     if name.startswith("/"):
         name = name[1:]
     return posixpath.normpath(
         posixpath.join(self.settings.AWS_S3_KEY_PREFIX,
                        _to_posix_path(name)))
示例#58
0
 def normpath(self, path):
     path2 = posixpath.normpath(urllib.unquote(path))
     if path.endswith("/"):
         path2 += "/"
     return path2
示例#59
0
文件: resource.py 项目: yjkim/hue
 def _join_uri(self, relpath):
     if relpath is None:
         return self._path
     return self._path + posixpath.normpath('/' + relpath)
示例#60
0
def list_android_dir(path, sorted=True, sizes=False, recursive=False):
    '''List the contents of the given directory on Android.
       Returns a list of filenames if sizes=False.
       If sizes=True, returns a list of tuples (filename, int size).
       If recursive, return a list of relative paths of leaf names
       like foo/bar/baz.jpg.
    '''
    lenpath = len(path)

    if recursive:
        args = ["adb", "shell", "ls", "-lR", path]
    else:
        args = ["adb", "shell", "ls", "-l", path]

    proc = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE)
    stdout_lines = proc.communicate()[0].decode().split('\n')
    file_list = []
    cur_subdir = ''
    for line in stdout_lines:
        line = line.strip()
        if not line:
            continue

        # In recursive mode, each directory will be listed with a colon.
        if recursive and line.endswith(':'):
            cur_subdir = line[:-1]
            continue

        l = line.split()
        nwords = len(l)
        if not nwords:
            continue

        if line.startswith('-rw'):
            if nwords < 7:
                print("Not enough words for a file listing: %s"% l)
                continue
            # Account for filenames with spaces: anything from element 6
            # to the end is the filename.
            fname = ' '.join(l[6:])

            if recursive and cur_subdir:
                fname = posixpath.normpath(posixpath.join(cur_subdir,
                                                          fname))[lenpath-1:]
                # Dependng on whether the original path ended with a slash,
                # fname might incorrectly start with one
                # because lenpath might be too small by one.
                if fname.startswith('/'):
                    fname = fname[1:]
            if sizes:
                try:
                    file_list.append((fname, int(l[3])))
                except:
                    # This could happen for the initial "Total:" line
                    pass
            else:
                file_list.append(fname)

        # elif line.startswith('drw'):
        #     print("%s is a directory" % l[-1])

        # else:
        #     print("Not a file or directory: %s" % l)

    if sorted:
        file_list.sort()

    return file_list