def _CreateDataSources(self, request):
   server_instance = self._server_instance
   data_sources = CreateDataSources(server_instance, request=request)
   data_sources.update({
     'samples': server_instance.samples_data_source_factory.Create(request),
   })
   return data_sources
 def _CreateDataSources(self, request):
     server_instance = self._server_instance
     data_sources = CreateDataSources(server_instance, request=request)
     data_sources.update({
         'samples':
         server_instance.samples_data_source_factory.Create(request),
     })
     return data_sources
Beispiel #3
0
    def _GetImpl(self):
        # Cron strategy:
        #
        # Collect all DataSources, the PlatformBundle, the ContentProviders, and
        # any other statically renderered contents (e.g. examples content),
        # and spin up taskqueue tasks which will refresh any cached data relevant
        # to these assets.
        #
        # TODO(rockot/kalman): At the moment examples are not actually refreshed
        # because they're too slow.

        _log.info('starting')

        server_instance = self._GetSafeServerInstance()
        master_fs = server_instance.host_file_system_provider.GetMaster()
        master_commit = master_fs.GetCommitID().Get()

        # This is the guy that would be responsible for refreshing the cache of
        # examples. Here for posterity, hopefully it will be added to the targets
        # below someday.
        render_refresher = RenderRefresher(server_instance, self._request)

        # Get the default taskqueue
        queue = taskqueue.Queue()

        # GAE documentation specifies that it's bad to add tasks to a queue
        # within one second of purging. We wait 2 seconds, because we like
        # to go the extra mile.
        queue.purge()
        time.sleep(2)

        success = True
        try:
            data_sources = CreateDataSources(server_instance)
            targets = (
                data_sources.items() +
                [('content_providers', server_instance.content_providers),
                 ('platform_bundle', server_instance.platform_bundle)])
            title = 'initializing %s parallel targets' % len(targets)
            _log.info(title)
            timer = Timer()
            for name, target in targets:
                refresh_paths = target.GetRefreshPaths()
                for path in refresh_paths:
                    queue.add(
                        taskqueue.Task(url='/_refresh/%s/%s' % (name, path),
                                       params={'commit': master_commit}))
            _log.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
        except:
            # This should never actually happen (each cron step does its own
            # conservative error checking), so re-raise no matter what it is.
            _log.error('uncaught error: %s' % traceback.format_exc())
            success = False
            raise
        finally:
            _log.info('finished (%s)', 'success' if success else 'FAILED')
            return (Response.Ok('Success')
                    if success else Response.InternalError('Failure'))
  def _GetImpl(self):
    # Cron strategy:
    #
    # Collect all DataSources, the PlatformBundle, the ContentProviders, and
    # any other statically renderered contents (e.g. examples content),
    # and spin up taskqueue tasks which will refresh any cached data relevant
    # to these assets.
    #
    # TODO(rockot/kalman): At the moment examples are not actually refreshed
    # because they're too slow.

    _log.info('starting')

    server_instance = self._GetSafeServerInstance()
    master_fs = server_instance.host_file_system_provider.GetMaster()
    master_commit = master_fs.GetCommitID().Get()

    # This is the guy that would be responsible for refreshing the cache of
    # examples. Here for posterity, hopefully it will be added to the targets
    # below someday.
    render_refresher = RenderRefresher(server_instance, self._request)

    # Get the default taskqueue
    queue = taskqueue.Queue()

    # GAE documentation specifies that it's bad to add tasks to a queue
    # within one second of purging. We wait 2 seconds, because we like
    # to go the extra mile.
    queue.purge()
    time.sleep(2)

    success = True
    try:
      data_sources = CreateDataSources(server_instance)
      targets = (data_sources.items() +
                 [('content_providers', server_instance.content_providers),
                  ('platform_bundle', server_instance.platform_bundle)])
      title = 'initializing %s parallel targets' % len(targets)
      _log.info(title)
      timer = Timer()
      for name, target in targets:
        refresh_paths = target.GetRefreshPaths()
        for path in refresh_paths:
          queue.add(taskqueue.Task(url='/_refresh/%s/%s' % (name, path),
                                   params={'commit': master_commit}))
      _log.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
    except:
      # This should never actually happen (each cron step does its own
      # conservative error checking), so re-raise no matter what it is.
      _log.error('uncaught error: %s' % traceback.format_exc())
      success = False
      raise
    finally:
      _log.info('finished (%s)', 'success' if success else 'FAILED')
      return (Response.Ok('Success') if success else
              Response.InternalError('Failure'))
Beispiel #5
0
    def Render(self,
               template,
               request,
               data_sources=None,
               additional_context=None):
        '''Renders |template| using |request|.

    Specify |data_sources| to only include the DataSources with the given names
    when rendering the template.

    Specify |additional_context| to inject additional template context when
    rendering the template.
    '''
        assert isinstance(template, Handlebar), type(template)
        render_context = CreateDataSources(self._server_instance, request)
        if data_sources is not None:
            render_context = dict((name, d)
                                  for name, d in render_context.iteritems()
                                  if name in data_sources)
        render_context.update({
            'apps_samples_url':
            GITHUB_BASE,
            'base_path':
            self._server_instance.base_path,
            'extensions_samples_url':
            EXTENSIONS_SAMPLES,
            'static':
            self._server_instance.base_path + 'static',
        })
        render_context.update(additional_context or {})
        render_data = template.Render(render_context)
        return render_data.text, render_data.errors
Beispiel #6
0
def UpdateCache(single_data_source=None, commit=None):
  '''Attempts to populate the datastore with a bunch of information derived from
  a given commit.
  '''
  server_instance = _CreateServerInstance(commit)

  # This is the guy that would be responsible for refreshing the cache of
  # examples. Here for posterity, hopefully it will be added to the targets
  # below someday.
  # render_refresher = RenderRefresher(server_instance, self._request)

  data_sources = CreateDataSources(server_instance)
  data_sources['content_providers'] = server_instance.content_providers
  data_sources['platform_bundle'] = server_instance.platform_bundle
  if single_data_source:
    _UpdateDataSource(single_data_source, data_sources[single_data_source])
  else:
    for name, source in data_sources.iteritems():
      _UpdateDataSource(name, source)
Beispiel #7
0
  def Render(self,
             template,
             request,
             data_sources=None,
             additional_context=None):
    '''Renders |template| using |request|.

    Specify |data_sources| to only include the DataSources with the given names
    when rendering the template.

    Specify |additional_context| to inject additional template context when
    rendering the template.
    '''
    assert isinstance(template, Motemplate), type(template)
    render_context = CreateDataSources(self._server_instance, request)
    if data_sources is not None:
      render_context = dict((name, d) for name, d in render_context.iteritems()
                            if name in data_sources)
    render_context.update({
      'apps_samples_url': GITHUB_BASE,
      'base_path': self._server_instance.base_path,
      'extensions_samples_url': EXTENSIONS_SAMPLES,
      'static': self._server_instance.base_path + 'static',
    })
    render_context.update(additional_context or {})
    render_data = template.Render(render_context)
    return render_data.text, render_data.errors
Beispiel #8
0
 def Render(self, template, request):
   assert isinstance(template, Handlebar), type(template)
   server_instance = self._server_instance
   render_context = {
     'api_list': server_instance.api_list_data_source_factory.Create(),
     'apis': server_instance.api_data_source_factory.Create(request),
     'apps_samples_url': GITHUB_BASE,
     'base_path': server_instance.base_path,
     'extensions_samples_url': EXTENSIONS_SAMPLES,
     'false': False,
     'intros': server_instance.intro_data_source_factory.Create(),
     'samples': server_instance.samples_data_source_factory.Create(request),
     'static': server_instance.base_path + 'static',
     'true': True,
   }
   render_context.update(CreateDataSources(server_instance, request=request))
   render_data = template.render(render_context)
   if render_data.errors:
     logging.error('Handlebar error(s) rendering %s:\n%s' %
         (template._name, '  \n'.join(render_data.errors)))
   return render_data.text
Beispiel #9
0
    def _GetImpl(self):
        # Cron strategy:
        #
        # Find all public template files and static files, and render them. Most of
        # the time these won't have changed since the last cron run, so it's a
        # little wasteful, but hopefully rendering is really fast (if it isn't we
        # have a problem).
        _cronlog.info('starting')

        # This is returned every time RenderServlet wants to create a new
        # ServerInstance.
        #
        # TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
        # everything. Need retry logic at the fetcher level.
        server_instance = self._GetSafeServerInstance()
        trunk_fs = server_instance.host_file_system_provider.GetTrunk()

        def render(path):
            request = Request(path, self._request.host, self._request.headers)
            delegate = _SingletonRenderServletDelegate(server_instance)
            return RenderServlet(request, delegate).Get()

        def request_files_in_dir(path, prefix='', strip_ext=None):
            '''Requests every file found under |path| in this host file system, with
      a request prefix of |prefix|. |strip_ext| is an optional list of file
      extensions that should be stripped from paths before requesting.
      '''
            def maybe_strip_ext(name):
                if name == SITE_VERIFICATION_FILE or not strip_ext:
                    return name
                base, ext = posixpath.splitext(name)
                return base if ext in strip_ext else name

            files = [
                maybe_strip_ext(name)
                for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)
            ]
            return _RequestEachItem(path, files, render)

        results = []

        try:
            # Start running the hand-written Cron methods first; they can be run in
            # parallel. They are resolved at the end.
            def run_cron_for_future(target):
                title = target.__class__.__name__
                future, init_timer = TimerClosure(target.Cron)
                assert isinstance(
                    future,
                    Future), ('%s.Cron() did not return a Future' % title)

                def resolve():
                    resolve_timer = Timer()
                    try:
                        future.Get()
                    except Exception as e:
                        _cronlog.error('%s: error %s' %
                                       (title, traceback.format_exc()))
                        results.append(False)
                        if IsDeadlineExceededError(e): raise
                    finally:
                        resolve_timer.Stop()
                        _cronlog.info(
                            '%s took %s: %s to initialize and %s to resolve' %
                            (title,
                             init_timer.With(resolve_timer).FormatElapsed(),
                             init_timer.FormatElapsed(),
                             resolve_timer.FormatElapsed()))

                return Future(delegate=Gettable(resolve))

            targets = (CreateDataSources(server_instance).values() +
                       [server_instance.content_providers])
            title = 'initializing %s parallel Cron targets' % len(targets)
            _cronlog.info(title)
            timer = Timer()
            try:
                cron_futures = [
                    run_cron_for_future(target) for target in targets
                ]
            finally:
                _cronlog.info('%s took %s' %
                              (title, timer.Stop().FormatElapsed()))

            # Rendering the public templates will also pull in all of the private
            # templates.
            results.append(
                request_files_in_dir(PUBLIC_TEMPLATES,
                                     strip_ext=('.html', '.md')))

            # Rendering the public templates will have pulled in the .js and
            # manifest.json files (for listing examples on the API reference pages),
            # but there are still images, CSS, etc.
            results.append(request_files_in_dir(STATIC_DOCS, prefix='static'))

            # Samples are too expensive to run on the dev server, where there is no
            # parallel fetch.
            if not IsDevServer():
                # Fetch each individual sample file.
                results.append(
                    request_files_in_dir(EXAMPLES,
                                         prefix='extensions/examples'))

                # Fetch the zip file of each example (contains all the individual
                # files).
                example_zips = []
                for root, _, files in trunk_fs.Walk(EXAMPLES):
                    example_zips.extend(root + '.zip' for name in files
                                        if name == 'manifest.json')
                results.append(
                    _RequestEachItem(
                        'example zips', example_zips,
                        lambda path: render('extensions/examples/' + path)))

            # Resolve the hand-written Cron method futures.
            title = 'resolving %s parallel Cron targets' % len(targets)
            _cronlog.info(title)
            timer = Timer()
            try:
                for future in cron_futures:
                    future.Get()
            finally:
                _cronlog.info('%s took %s' %
                              (title, timer.Stop().FormatElapsed()))

        except:
            results.append(False)
            # This should never actually happen (each cron step does its own
            # conservative error checking), so re-raise no matter what it is.
            _cronlog.error('uncaught error: %s' % traceback.format_exc())
            raise
        finally:
            success = all(results)
            _cronlog.info('finished (%s)', 'success' if success else 'FAILED')
            return (Response.Ok('Success')
                    if success else Response.InternalError('Failure'))