def Stat(self, path): AssertIsValid(path) try: return _CreateStatInfo(self._bucket, path) except errors.AuthorizationError: self._warnAboutAuthError() raise
def ReadSingle(self, path): '''Reads a single file from the FileSystem. Returns a Future with the same rules as Read(). ''' AssertIsValid(path) read_single = self.Read([path]) return Future(delegate=Gettable(lambda: read_single.Get()[path]))
def GetContentAndType(self, path): '''Returns the ContentAndType of the file at |path|. ''' AssertIsValid(path) base, ext = posixpath.splitext(path) # Check for a zip file first, if zip is enabled. if self._directory_zipper and ext == '.zip': zip_future = self._directory_zipper.Zip(ToDirectory(base)) return Future(callback=lambda: ContentAndType( zip_future.Get(), 'application/zip', None)) # If there is no file extension, look for a file with one of the default # extensions. If one cannot be found, check if the path is a directory. # If it is, then check for an index file with one of the default # extensions. if not ext: new_path = self._AddExt(path) # Add a trailing / to check if it is a directory and not a file with # no extension. if new_path is None and self.file_system.Exists( ToDirectory(path)).Get(): new_path = self._AddExt(Join(path, 'index')) # If an index file wasn't found in this directly then we're never going # to find a file. if new_path is None: return FileNotFoundError.RaiseInFuture( '"%s" is a directory' % path) if new_path is not None: path = new_path return self._content_cache.GetFromFile(path)
def ExtractPlatformFromURL(url): '''Returns 'apps' or 'extensions' depending on the URL. ''' AssertIsValid(url) platform = url.split('/', 1)[0] if platform not in GetPlatforms(): return None return platform
def __init__(self, bucket, debug_access_token=None, debug_bucket_prefix=None): self._bucket = bucket if debug_access_token: logging.debug('gcs: using debug access token: %s' % debug_access_token) common.set_access_token(debug_access_token) if debug_bucket_prefix: logging.debug('gcs: prefixing all bucket names with %s' % debug_bucket_prefix) self._bucket = debug_bucket_prefix + self._bucket AssertIsValid(self._bucket)
def resolve(): result = {} for path in paths: AssertIsValid(path) full_path = os.path.join( self._base_path, _ConvertToFilepath(path).lstrip(os.sep)) if path == '' or path.endswith('/'): result[path] = _ListDir(full_path) else: result[path] = _ReadFile(full_path) return result
def update_result(item, path): AssertIsValid(path) if isinstance(item, dict): if path != '': path += '/' result[path] = [p if isinstance(content, basestring) else (p + '/') for p, content in item.iteritems()] for subpath, subitem in item.iteritems(): update_result(subitem, path + subpath) elif isinstance(item, basestring): result[path] = item else: raise ValueError('Unsupported item type: %s' % type(item))
def _FindFileForPath(self, path): '''Finds the real file backing |path|. This may require looking for the correct file extension, or looking for an 'index' file if it's a directory. Returns None if no path is found. ''' AssertIsValid(path) _, ext = posixpath.splitext(path) if ext: # There was already an extension, trust that it's a path. Elsewhere # up the stack this will be caught if it's not. return Future(value=path) def find_file_with_name(name): '''Tries to find a file in the file system called |name| with one of the default extensions of this content provider. If none is found, returns None. ''' paths = [name + ext for ext in self._default_extensions] def get_first_path_which_exists(existence): for exists, path in zip(existence, paths): if exists: return path return None return (All(self.file_system.Exists(path) for path in paths).Then(get_first_path_which_exists)) def find_index_file(): '''Tries to find an index file in |path|, if |path| is a directory. If not, or if there is no index file, returns None. ''' def get_index_if_directory_exists(directory_exists): if not directory_exists: return None return find_file_with_name(Join(path, 'index')) return (self.file_system.Exists( ToDirectory(path)).Then(get_index_if_directory_exists)) # Try to find a file with the right name. If not, and it's a directory, # look for an index file in that directory. If nothing at all is found, # return the original |path| - its nonexistence will be caught up the stack. return (find_file_with_name(path).Then( lambda found: found or find_index_file()).Then( lambda found: found or path))
def GetCanonicalPath(self, path): '''Gets the canonical location of |path|. This class is tolerant of spelling errors and missing files that are in other directories, and this returns the correct/canonical path for those. For example, the canonical path of "browseraction" is probably "extensions/browserAction.html". Note that the canonical path is relative to this content provider i.e. given relative to |path|. It does not add the "serveFrom" prefix which would have been pulled out in ContentProviders, callers must do that themselves. ''' AssertIsValid(path) base, ext = posixpath.splitext(path) if self._directory_zipper and ext == '.zip': # The canonical location of zip files is the canonical location of the # directory to zip + '.zip'. return self._path_canonicalizer.Canonicalize(base + '/').rstrip('/') + ext return self._path_canonicalizer.Canonicalize(path)
def testCronAndPublicFiles(self): '''Runs cron then requests every public file. Cron needs to be run first because the public file requests are offline. ''' if _EXPLICIT_TEST_FILES is not None: return print('Running cron...') start_time = time.time() try: response = Handler(Request.ForTest('/_cron')).Get() if response: self.assertEqual(200, response.status) self.assertEqual('Success', response.content.ToString()) else: self.fail('No response for _cron') finally: print('Took %s seconds' % (time.time() - start_time)) # TODO(kalman): Re-enable this, but it takes about an hour at the moment, # presumably because every page now has a lot of links on it from the # topnav. #print("Checking for broken links...") #start_time = time.time() #link_error_detector = LinkErrorDetector( # # TODO(kalman): Use of ChrootFileSystem here indicates a hack. Fix. # ChrootFileSystem(LocalFileSystem.Create(), EXTENSIONS), # lambda path: Handler(Request.ForTest(path)).Get(), # 'templates/public', # ('extensions/index.html', 'apps/about_apps.html')) #broken_links = link_error_detector.GetBrokenLinks() #if broken_links: # print('Found %d broken links.' % ( # len(broken_links))) # if _VERBOSE: # print(StringifyBrokenLinks(broken_links)) #broken_links_set = set(broken_links) #known_broken_links_path = os.path.join( # Server2Path('known_broken_links.json')) #try: # with open(known_broken_links_path, 'r') as f: # # The JSON file converts tuples and sets into lists, and for this # # set union/difference logic they need to be converted back. # known_broken_links = set(tuple(item) for item in json.load(f)) #except IOError: # known_broken_links = set() #newly_broken_links = broken_links_set - known_broken_links #fixed_links = known_broken_links - broken_links_set #print('Took %s seconds.' % (time.time() - start_time)) #print('Searching for orphaned pages...') #start_time = time.time() #orphaned_pages = link_error_detector.GetOrphanedPages() #if orphaned_pages: # # TODO(jshumway): Test should fail when orphaned pages are detected. # print('Found %d orphaned pages:' % len(orphaned_pages)) # for page in orphaned_pages: # print(page) #print('Took %s seconds.' % (time.time() - start_time)) public_files = _GetPublicFiles() print('Rendering %s public files...' % len(public_files.keys())) start_time = time.time() try: for path, content in public_files.iteritems(): AssertIsValid(path) if path.endswith('redirects.json'): continue # The non-example html and md files are served without their file # extensions. path_without_ext, ext = posixpath.splitext(path) if (ext in ('.html', '.md') and '/examples/' not in path and path != SITE_VERIFICATION_FILE): path = path_without_ext def check_result(response): self.assertEqual( 200, response.status, 'Got %s when rendering %s' % (response.status, path)) # This is reaaaaally rough since usually these will be tiny templates # that render large files. At least it'll catch zero-length responses. self.assertTrue( len(response.content) >= len(content), 'Rendered content length was %s vs template content length %s ' 'when rendering %s' % (len(response.content), len(content), path)) check_result(Handler(Request.ForTest(path)).Get()) if path.startswith(('apps/', 'extensions/')): # Make sure that adding the .html will temporarily redirect to # the path without the .html for APIs and articles. if '/examples/' not in path: redirect_response = Handler( Request.ForTest(path + '.html')).Get() self.assertEqual( ('/' + path, False), redirect_response.GetRedirect(), '%s.html did not (temporarily) redirect to %s (status %s)' % (path, path, redirect_response.status)) # Make sure including a channel will permanently redirect to the same # path without a channel. for channel in BranchUtility.GetAllChannelNames(): redirect_response = Handler( Request.ForTest(posixpath.join(channel, path))).Get() self.assertEqual( ('/' + path, True), redirect_response.GetRedirect(), '%s/%s did not (permanently) redirect to %s (status %s)' % (channel, path, path, redirect_response.status)) # Samples are internationalized, test some locales. if path.endswith('/samples'): for lang in ('en-US', 'es', 'ar'): check_result( Handler( Request.ForTest(path, headers={ 'Accept-Language': '%s;q=0.8' % lang })).Get()) finally: print('Took %s seconds' % (time.time() - start_time))
def Stat(self, path): AssertIsValid(path) return self._CreateStatInfo(path)
def __init__(self, bucket, debug_bucket_prefix=None): self._bucket = bucket self._access_token = None self._last_commit_hash = None AssertIsValid(self._bucket)