示例#1
0
def _MakeChannelDict(channel_name):
    channel_dict = {
        'channels': [{
            'name': name
        } for name in BranchUtility.GetAllChannelNames()],
        'current': channel_name
    }
    for channel in channel_dict['channels']:
        if channel['name'] == channel_name:
            channel['isCurrent'] = True
    return channel_dict
示例#2
0
    def _RedirectOldHosts(self, host, path):
        ''' Redirect paths from the old code.google.com to the new
    developer.chrome.com, retaining elements like the channel and https, if
    used.
    '''
        if urlsplit(host).hostname != 'code.google.com':
            return None

        path = path.split('/')
        if path and path[0] == 'chrome':
            path.pop(0)

        for channel in BranchUtility.GetAllChannelNames():
            if channel in path:
                path.remove(channel)
                path.insert(0, channel)
                break

        return 'https://developer.chrome.com/' + posixpath.join(*path)
示例#3
0
    def _RedirectFromCodeDotGoogleDotCom(self):
        host, path = (self._request.host, self._request.path)

        if not host in ('http://code.google.com', 'https://code.google.com'):
            return None

        new_host = 'http://developer.chrome.com'

        # switch to https if necessary
        if host.startswith('https'):
            new_host = new_host.replace('http', 'https', 1)

        new_path = path.split('/')
        if len(new_path) > 0 and new_path[0] == 'chrome':
            new_path.pop(0)
        for channel in BranchUtility.GetAllChannelNames():
            if channel in new_path:
                position = new_path.index(channel)
                new_path.pop(position)
                new_path.insert(0, channel)
        return Response.Redirect('/'.join([new_host] + new_path))
示例#4
0
 def GetAllChannelInfo(self):
     return tuple(
         self.GetChannelInfo(channel)
         for channel in BranchUtility.GetAllChannelNames())
示例#5
0
    def testCronAndPublicFiles(self):
        '''Runs cron then requests every public file. Cron needs to be run first
    because the public file requests are offline.
    '''
        if _EXPLICIT_TEST_FILES is not None:
            return

        print('Running cron...')
        start_time = time.time()
        try:
            response = Handler(Request.ForTest('/_cron/stable')).Get()
            self.assertEqual(200, response.status)
            self.assertEqual('Success', response.content.ToString())
        finally:
            print('Took %s seconds' % (time.time() - start_time))

        print("Checking for broken links...")
        start_time = time.time()
        link_error_detector = LinkErrorDetector(
            LocalFileSystem(os.path.join(sys.path[0], os.pardir, os.pardir)),
            lambda path: Handler(Request.ForTest(path)).Get(),
            'templates/public',
            ('extensions/index.html', 'apps/about_apps.html'))

        broken_links = link_error_detector.GetBrokenLinks()
        if broken_links:
            # TODO(jshumway): Test should fail when broken links are detected.
            print('Warning: Found %d broken links:' % (len(broken_links)))
            print(StringifyBrokenLinks(broken_links))

        print('Took %s seconds.' % (time.time() - start_time))

        print('Searching for orphaned pages...')
        start_time = time.time()
        orphaned_pages = link_error_detector.GetOrphanedPages()
        if orphaned_pages:
            # TODO(jshumway): Test should fail when orphaned pages are detected.
            print('Warning: Found %d orphaned pages:' % len(orphaned_pages))
            for page in orphaned_pages:
                print(page)
        print('Took %s seconds.' % (time.time() - start_time))

        public_files = _GetPublicFiles()

        print('Rendering %s public files...' % len(public_files.keys()))
        start_time = time.time()
        try:
            for path, content in public_files.iteritems():
                if path.endswith('redirects.json'):
                    continue

                def check_result(response):
                    self.assertEqual(
                        200, response.status,
                        'Got %s when rendering %s' % (response.status, path))
                    # This is reaaaaally rough since usually these will be tiny templates
                    # that render large files. At least it'll catch zero-length responses.
                    self.assertTrue(
                        len(response.content) >= len(content),
                        'Content was "%s" when rendering %s' %
                        (response.content, path))

                check_result(Handler(Request.ForTest(path)).Get())

                # Make sure that leaving out the .html will temporarily redirect to the
                # path with the .html.
                if path.startswith(('apps/', 'extensions/')):
                    redirect_result = Handler(
                        Request.ForTest(posixpath.splitext(path)[0])).Get()
                    self.assertEqual((path, False),
                                     redirect_result.GetRedirect())

                # Make sure including a channel will permanently redirect to the same
                # path without a channel.
                for channel in BranchUtility.GetAllChannelNames():
                    redirect_result = Handler(
                        Request.ForTest('%s/%s' % (channel, path))).Get()
                    self.assertEqual((path, True),
                                     redirect_result.GetRedirect())

                # Samples are internationalized, test some locales.
                if path.endswith('/samples.html'):
                    for lang in ['en-US', 'es', 'ar']:
                        check_result(
                            Handler(
                                Request.ForTest(path,
                                                headers={
                                                    'Accept-Language':
                                                    '%s;q=0.8' % lang
                                                })).Get())
        finally:
            print('Took %s seconds' % (time.time() - start_time))
示例#6
0
    def testCronAndPublicFiles(self):
        '''Runs cron then requests every public file. Cron needs to be run first
    because the public file requests are offline.
    '''
        if _EXPLICIT_TEST_FILES is not None:
            return

        print('Running cron...')
        start_time = time.time()
        try:
            response = Handler(Request.ForTest('/_cron')).Get()
            if response:
                self.assertEqual(200, response.status)
                self.assertEqual('Success', response.content.ToString())
            else:
                self.fail('No response for _cron')
        finally:
            print('Took %s seconds' % (time.time() - start_time))

        # TODO(kalman): Re-enable this, but it takes about an hour at the moment,
        # presumably because every page now has a lot of links on it from the
        # topnav.

        #print("Checking for broken links...")
        #start_time = time.time()
        #link_error_detector = LinkErrorDetector(
        #    # TODO(kalman): Use of ChrootFileSystem here indicates a hack. Fix.
        #    ChrootFileSystem(LocalFileSystem.Create(), EXTENSIONS),
        #    lambda path: Handler(Request.ForTest(path)).Get(),
        #    'templates/public',
        #    ('extensions/index.html', 'apps/about_apps.html'))

        #broken_links = link_error_detector.GetBrokenLinks()
        #if broken_links:
        #  print('Found %d broken links.' % (
        #    len(broken_links)))
        #  if _VERBOSE:
        #    print(StringifyBrokenLinks(broken_links))

        #broken_links_set = set(broken_links)

        #known_broken_links_path = os.path.join(
        #    Server2Path('known_broken_links.json'))
        #try:
        #  with open(known_broken_links_path, 'r') as f:
        #    # The JSON file converts tuples and sets into lists, and for this
        #    # set union/difference logic they need to be converted back.
        #    known_broken_links = set(tuple(item) for item in json.load(f))
        #except IOError:
        #  known_broken_links = set()

        #newly_broken_links = broken_links_set - known_broken_links
        #fixed_links = known_broken_links - broken_links_set

        #print('Took %s seconds.' % (time.time() - start_time))

        #print('Searching for orphaned pages...')
        #start_time = time.time()
        #orphaned_pages = link_error_detector.GetOrphanedPages()
        #if orphaned_pages:
        #  # TODO(jshumway): Test should fail when orphaned pages are detected.
        #  print('Found %d orphaned pages:' % len(orphaned_pages))
        #  for page in orphaned_pages:
        #    print(page)
        #print('Took %s seconds.' % (time.time() - start_time))

        public_files = _GetPublicFiles()

        print('Rendering %s public files...' % len(public_files.keys()))
        start_time = time.time()
        try:
            for path, content in public_files.iteritems():
                AssertIsValid(path)
                if path.endswith('redirects.json'):
                    continue

                # The non-example html and md files are served without their file
                # extensions.
                path_without_ext, ext = posixpath.splitext(path)
                if (ext in ('.html', '.md') and '/examples/' not in path
                        and path != SITE_VERIFICATION_FILE):
                    path = path_without_ext

                def check_result(response):
                    self.assertEqual(
                        200, response.status,
                        'Got %s when rendering %s' % (response.status, path))

                    # This is reaaaaally rough since usually these will be tiny templates
                    # that render large files. At least it'll catch zero-length responses.
                    self.assertTrue(
                        len(response.content) >= len(content),
                        'Rendered content length was %s vs template content length %s '
                        'when rendering %s' %
                        (len(response.content), len(content), path))

                check_result(Handler(Request.ForTest(path)).Get())

                if path.startswith(('apps/', 'extensions/')):
                    # Make sure that adding the .html will temporarily redirect to
                    # the path without the .html for APIs and articles.
                    if '/examples/' not in path:
                        redirect_response = Handler(
                            Request.ForTest(path + '.html')).Get()
                        self.assertEqual(
                            ('/' + path, False),
                            redirect_response.GetRedirect(),
                            '%s.html did not (temporarily) redirect to %s (status %s)'
                            % (path, path, redirect_response.status))

                    # Make sure including a channel will permanently redirect to the same
                    # path without a channel.
                    for channel in BranchUtility.GetAllChannelNames():
                        redirect_response = Handler(
                            Request.ForTest(posixpath.join(channel,
                                                           path))).Get()
                        self.assertEqual(
                            ('/' + path, True),
                            redirect_response.GetRedirect(),
                            '%s/%s did not (permanently) redirect to %s (status %s)'
                            % (channel, path, path, redirect_response.status))

                # Samples are internationalized, test some locales.
                if path.endswith('/samples'):
                    for lang in ('en-US', 'es', 'ar'):
                        check_result(
                            Handler(
                                Request.ForTest(path,
                                                headers={
                                                    'Accept-Language':
                                                    '%s;q=0.8' % lang
                                                })).Get())
        finally:
            print('Took %s seconds' % (time.time() - start_time))
    def Canonicalize(self, path):
        '''Returns the canonical path for |path|, and whether that path is a
    permanent canonicalisation (e.g. when we redirect from a channel to a
    channel-less URL) or temporary (e.g. when we redirect from an apps-only API
    to an extensions one - we may at some point enable it for extensions).
    '''
        class ReturnType(object):
            def __init__(self, path, permanent):
                self.path = path
                self.permanent = permanent

            # Catch incorrect comparisons by disabling ==/!=.
            def __eq__(self, _):
                raise NotImplementedError()

            def __ne__(self, _):
                raise NotImplementedError()

        # Strip any channel info off it. There are no channels anymore.
        for channel_name in BranchUtility.GetAllChannelNames():
            channel_prefix = channel_name + '/'
            if path.startswith(channel_prefix):
                # Redirect now so that we can set the permanent-redirect bit.  Channel
                # redirects are the only things that should be permanent redirects;
                # anything else *could* change, so is temporary.
                return ReturnType(path[len(channel_prefix):], True)

        # No further work needed for static.
        if path.startswith('static/'):
            return ReturnType(path, False)

        # People go to just "extensions" or "apps". Redirect to the directory.
        if path in ('extensions', 'apps'):
            return ReturnType(path + '/', False)

        # The rest of this function deals with trying to figure out what API page
        # for extensions/apps to redirect to, if any. We see a few different cases
        # here:
        #  - Unqualified names ("browserAction.html"). These are easy to resolve;
        #    figure out whether it's an extension or app API and redirect.
        #     - but what if it's both? Well, assume extensions. Maybe later we can
        #       check analytics and see which is more popular.
        #  - Wrong names ("apps/browserAction.html"). This really does happen,
        #    damn it, so do the above logic but record which is the default.
        if path.startswith(('extensions/', 'apps/')):
            default_platform, reference_path = path.split('/', 1)
        else:
            default_platform, reference_path = ('extensions', path)

        try:
            apps_public = self._public_apis.GetFromFileListing('/'.join(
                (svn_constants.PUBLIC_TEMPLATE_PATH, 'apps')))
            extensions_public = self._public_apis.GetFromFileListing('/'.join(
                (svn_constants.PUBLIC_TEMPLATE_PATH, 'extensions')))
        except FileNotFoundError:
            # Probably offline.
            logging.warning(traceback.format_exc())
            return ReturnType(path, False)

        simple_reference_path = _SimplifyFileName(reference_path)
        apps_path = apps_public.get(simple_reference_path)
        extensions_path = extensions_public.get(simple_reference_path)

        if apps_path is None:
            if extensions_path is None:
                # No idea. Just return the original path. It'll probably 404.
                pass
            else:
                path = 'extensions/%s' % extensions_path
        else:
            if extensions_path is None:
                path = 'apps/%s' % apps_path
            else:
                assert apps_path == extensions_path
                path = '%s/%s' % (default_platform, apps_path)

        return ReturnType(path, False)
示例#8
0
def Parse(features_json):
    '''Process JSON from a _features.json file, standardizing it into a dictionary
  of Features.
  '''
    features = {}

    def ignore_feature(name, value):
        '''Returns true if this feature should be ignored. Features are ignored if
    they are only available to whitelisted apps or component extensions/apps, as
    in these cases the APIs are not available to public developers.

    Private APIs are also unavailable to public developers, but logic elsewhere
    makes sure they are not listed. So they shouldn't be ignored via this
    mechanism.
    '''
        if name.endswith('Private'):
            return False

        return value.get('location') == 'component' or 'whitelist' in value

        return False

    for name, value in deepcopy(features_json).iteritems():
        # Some feature names correspond to a list, typically because they're
        # whitelisted in stable for certain extensions and available in dev for
        # everybody else. Force a list down to a single feature by attempting to
        # remove the entries that don't affect the typical usage of an API.
        if isinstance(value, list):
            available_values = [
                subvalue for subvalue in value
                if not ignore_feature(name, subvalue)
            ]
            if not available_values:
                continue

            if len(available_values) == 1:
                value = available_values[0]
            else:
                # Multiple available values probably implies different feature
                # configurations for apps vs extensions. Currently, this is 'commands'.
                # To get the ball rolling, add a hack to combine the extension types.
                # See http://crbug.com/316194.
                extension_types = set()
                for value in available_values:
                    extension_types.update(value.get('extension_types', ()))

                # For the base value, select the one with the most recent availability.
                channel_names = BranchUtility.GetAllChannelNames()
                available_values.sort(key=lambda v: channel_names.index(
                    v.get('channel', 'stable')))
                value = available_values[0]

                value['extension_types'] = list(extension_types)

        if ignore_feature(name, value):
            continue

        # Now we transform 'extension_types' into the more useful 'platforms'.
        #
        # But first, note that 'platforms' has a double meaning. In the docserver
        # model (what we're in the process of generating) it means 'apps' vs
        # 'extensions'. In the JSON features as read from Chrome it means 'win' vs
        # 'mac'. Ignore the latter.
        value.pop('platforms', None)
        extension_types = value.pop('extension_types', None)

        platforms = []
        if extension_types is not None:
            platforms = _GetPlatformsForExtensionTypes(extension_types)

        features[name] = {
            'name': name,
            'platforms': platforms,
        }
        features[name].update(value)

    return features
示例#9
0
  def testCronAndPublicFiles(self):
    '''Runs cron then requests every public file. Cron needs to be run first
    because the public file requests are offline.
    '''
    if _EXPLICIT_TEST_FILES is not None:
      return

    print('Running cron...')
    start_time = time.time()
    try:
      response = Handler(Request.ForTest('/_cron')).Get()
      self.assertEqual(200, response.status)
      self.assertEqual('Success', response.content.ToString())
    finally:
      print('Took %s seconds' % (time.time() - start_time))

    print("Checking for broken links...")
    start_time = time.time()
    link_error_detector = LinkErrorDetector(
        # TODO(kalman): Use of ChrootFileSystem here indicates a hack. Fix.
        ChrootFileSystem(LocalFileSystem.Create(), EXTENSIONS),
        lambda path: Handler(Request.ForTest(path)).Get(),
        'templates/public',
        ('extensions/index.html', 'apps/about_apps.html'))

    broken_links = link_error_detector.GetBrokenLinks()
    if broken_links and _VERBOSE:
      print('The broken links are:')
      print(StringifyBrokenLinks(broken_links))

    broken_links_set = set(broken_links)

    known_broken_links_path = os.path.join(
        sys.path[0], 'known_broken_links.json')
    try:
      with open(known_broken_links_path, 'r') as f:
        # The JSON file converts tuples and sets into lists, and for this
        # set union/difference logic they need to be converted back.
        known_broken_links = set(tuple(item) for item in json.load(f))
    except IOError:
      known_broken_links = set()

    newly_broken_links = broken_links_set - known_broken_links
    fixed_links = known_broken_links - broken_links_set

    if _REBASE:
      print('Rebasing broken links with %s newly broken and %s fixed links.' %
            (len(newly_broken_links), len(fixed_links)))
      with open(known_broken_links_path, 'w') as f:
        json.dump(broken_links, f,
                  indent=2, separators=(',', ': '), sort_keys=True)
    else:
      if fixed_links or newly_broken_links:
        print('Found %s broken links, and some have changed. '
              'If this is acceptable or expected then run %s with the --rebase '
              'option.' % (len(broken_links), os.path.split(__file__)[-1]))
      elif broken_links:
        print('Found %s broken links, but there were no changes.' %
              len(broken_links))
      if fixed_links:
        print('%s broken links have been fixed:' % len(fixed_links))
        print(StringifyBrokenLinks(fixed_links))
      if newly_broken_links:
        print('There are %s new broken links:' % len(newly_broken_links))
        print(StringifyBrokenLinks(newly_broken_links))
        self.fail('See logging for details.')

    print('Took %s seconds.' % (time.time() - start_time))

    print('Searching for orphaned pages...')
    start_time = time.time()
    orphaned_pages = link_error_detector.GetOrphanedPages()
    if orphaned_pages:
      # TODO(jshumway): Test should fail when orphaned pages are detected.
      print('Warning: Found %d orphaned pages:' % len(orphaned_pages))
      for page in orphaned_pages:
        print(page)
    print('Took %s seconds.' % (time.time() - start_time))

    public_files = _GetPublicFiles()

    print('Rendering %s public files...' % len(public_files.keys()))
    start_time = time.time()
    try:
      for path, content in public_files.iteritems():
        if path.endswith('redirects.json'):
          continue
        def check_result(response):
          self.assertEqual(200, response.status,
              'Got %s when rendering %s' % (response.status, path))
          # This is reaaaaally rough since usually these will be tiny templates
          # that render large files. At least it'll catch zero-length responses.
          self.assertTrue(len(response.content) >= len(content),
              'Content was "%s" when rendering %s' % (response.content, path))

        check_result(Handler(Request.ForTest(path)).Get())

        # Make sure that leaving out the .html will temporarily redirect to the
        # path with the .html.
        if path.startswith(('apps/', 'extensions/')):
          redirect_result = Handler(
              Request.ForTest(posixpath.splitext(path)[0])).Get()
          self.assertEqual((path, False), redirect_result.GetRedirect())

        # Make sure including a channel will permanently redirect to the same
        # path without a channel.
        for channel in BranchUtility.GetAllChannelNames():
          redirect_result = Handler(
              Request.ForTest('%s/%s' % (channel, path))).Get()
          self.assertEqual((path, True), redirect_result.GetRedirect())

        # Samples are internationalized, test some locales.
        if path.endswith('/samples.html'):
          for lang in ['en-US', 'es', 'ar']:
            check_result(Handler(Request.ForTest(
                path,
                headers={'Accept-Language': '%s;q=0.8' % lang})).Get())
    finally:
      print('Took %s seconds' % (time.time() - start_time))