Exemple #1
0
def _RequestEachItem(title, items, request_callback):
    '''Runs a task |request_callback| named |title| for each item in |items|.
  |request_callback| must take an item and return a servlet response.
  Returns true if every item was successfully run, false if any return a
  non-200 response or raise an exception.
  '''
    _log.info('%s: starting', title)
    success_count, failure_count = 0, 0
    timer = Timer()
    try:
        for i, item in enumerate(items):

            def error_message(detail):
                return '%s: error rendering %s (%s of %s): %s' % (
                    title, item, i + 1, len(items), detail)

            try:
                response = request_callback(item)
                if response.status == 200:
                    success_count += 1
                else:
                    _log.error(
                        error_message('response status %s' % response.status))
                    failure_count += 1
            except Exception as e:
                _log.error(error_message(traceback.format_exc()))
                failure_count += 1
                if IsDeadlineExceededError(e): raise
    finally:
        _log.info('%s: rendered %s of %s with %s failures in %s', title,
                  success_count, len(items), failure_count,
                  timer.Stop().FormatElapsed())
    return success_count == len(items)
Exemple #2
0
    def _GetImpl(self):
        # Cron strategy:
        #
        # Collect all DataSources, the PlatformBundle, the ContentProviders, and
        # any other statically renderered contents (e.g. examples content),
        # and spin up taskqueue tasks which will refresh any cached data relevant
        # to these assets.
        #
        # TODO(rockot/kalman): At the moment examples are not actually refreshed
        # because they're too slow.

        _log.info('starting')

        server_instance = self._GetSafeServerInstance()
        master_fs = server_instance.host_file_system_provider.GetMaster()
        master_commit = master_fs.GetCommitID().Get()

        # This is the guy that would be responsible for refreshing the cache of
        # examples. Here for posterity, hopefully it will be added to the targets
        # below someday.
        render_refresher = RenderRefresher(server_instance, self._request)

        # Get the default taskqueue
        queue = taskqueue.Queue()

        # GAE documentation specifies that it's bad to add tasks to a queue
        # within one second of purging. We wait 2 seconds, because we like
        # to go the extra mile.
        queue.purge()
        time.sleep(2)

        success = True
        try:
            data_sources = CreateDataSources(server_instance)
            targets = (
                data_sources.items() +
                [('content_providers', server_instance.content_providers),
                 ('platform_bundle', server_instance.platform_bundle)])
            title = 'initializing %s parallel targets' % len(targets)
            _log.info(title)
            timer = Timer()
            for name, target in targets:
                refresh_paths = target.GetRefreshPaths()
                for path in refresh_paths:
                    queue.add(
                        taskqueue.Task(url='/_refresh/%s/%s' % (name, path),
                                       params={'commit': master_commit}))
            _log.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
        except:
            # This should never actually happen (each cron step does its own
            # conservative error checking), so re-raise no matter what it is.
            _log.error('uncaught error: %s' % traceback.format_exc())
            success = False
            raise
        finally:
            _log.info('finished (%s)', 'success' if success else 'FAILED')
            return (Response.Ok('Success')
                    if success else Response.InternalError('Failure'))
  def _GetImpl(self):
    path = self._request.path.strip('/')
    parts = self._request.path.split('/', 1)
    source_name = parts[0]
    if len(parts) == 2:
      source_path = parts[1]
    else:
      source_path = None

    _log.info('starting refresh of %s DataSource %s' %
        (source_name, '' if source_path is None else '[%s]' % source_path))

    if 'commit' in self._request.arguments:
      commit = self._request.arguments['commit']
    else:
      _log.warning('No commit given; refreshing from master. '
                   'This is probably NOT what you want.')
      commit = None

    server_instance = self._CreateServerInstance(commit)
    success = True
    try:
      if source_name == 'platform_bundle':
        data_source = server_instance.platform_bundle
      elif source_name == 'content_providers':
        data_source = server_instance.content_providers
      else:
        data_source = CreateDataSource(source_name, server_instance)

      class_name = data_source.__class__.__name__
      refresh_future = data_source.Refresh(source_path)
      assert isinstance(refresh_future, Future), (
          '%s.Refresh() did not return a Future' % class_name)
      timer = Timer()
      try:
        refresh_future.Get()
      except Exception as e:
        _log.error('%s: error %s' % (class_name, traceback.format_exc()))
        success = False
        if IsFileSystemThrottledError(e):
          return Response.ThrottledError('Throttled')
        raise
      finally:
        _log.info('Refreshing %s took %s' %
            (class_name, timer.Stop().FormatElapsed()))

    except:
      success = False
      # This should never actually happen.
      _log.error('uncaught error: %s' % traceback.format_exc())
      raise
    finally:
      _log.info('finished (%s)', 'success' if success else 'FAILED')
      return (Response.Ok('Success') if success else
              Response.InternalError('Failure'))
Exemple #4
0
def _UpdateDataSource(name, data_source):
  try:
    class_name = data_source.__class__.__name__
    timer = Timer()
    logging.info('Updating %s...' % name)
    data_source.Refresh().Get()
  except Exception as e:
    logging.error('%s: error %s' % (class_name, traceback.format_exc()))
    raise e
  finally:
    logging.info('Updating %s took %s' % (name, timer.Stop().FormatElapsed()))
Exemple #5
0
 def resolve():
     resolve_timer = Timer()
     try:
         future.Get()
     except Exception as e:
         _cronlog.error('%s: error %s' %
                        (title, traceback.format_exc()))
         results.append(False)
         if IsDeadlineExceededError(e): raise
     finally:
         resolve_timer.Stop()
         _cronlog.info(
             '%s took %s: %s to initialize and %s to resolve' %
             (title,
              init_timer.With(resolve_timer).FormatElapsed(),
              init_timer.FormatElapsed(),
              resolve_timer.FormatElapsed()))
def getPointsFromFile(path=PATH_POINTS_FILE):
    """Recupera os pontos a partir de um aquivo pre-formatado

    Parameters:
        path (String): Caminho para o arquivo

    Returns:
        Array: Array de Pontos
        Array: Ponto Minimo em X
        Array: Ponto Maximo em X
        Array: Ponto Minimo em Y
        Array: Ponto Maximo em Y

    """
    if (SHOW_DEBUG_TIMER): localTimer = Timer(True)
    points = []

    min_x = []
    max_x = []
    min_y = []
    max_y = []
    firstIteration = True

    file = open(path, "r")

    for line in file:
        point = [int(d) for d in re.findall(r'-?\d+', line)]

        #Se é a primeira vez executado
        if (firstIteration):
            firstIteration = False
            min_x = max_x = min_y = max_y = point
        #Senão
        else:
            if (point[0] < min_x[0]): min_x = point
            if (point[0] > max_x[0]): max_x = point
            if (point[1] < min_y[1]): min_y = point
            if (point[1] > max_y[1]): max_y = point

        points.append(point)

    if (SHOW_DEBUG_TIMER): print(">> Pontos lidos em:", localTimer.Stop())
    return points, min_x, min_y, max_x, max_y
Exemple #7
0
def _Main(argv):
  try:
    opts = dict((name[2:], value) for name, value in
                getopt.getopt(argv, '',
                              ['load-file=', 'data-source=', 'commit=',
                               'no-refresh', 'no-push', 'save-file=',
                               'no-master-update', 'push-all', 'force'])[0])
  except getopt.GetoptError as e:
    print '%s\n' % e
    print (
    'Usage: update_cache.py [options]\n\n'
    'Options:\n'
    '  --data-source=NAME        Limit update to a single data source.\n'
    '  --load-file=FILE          Load object store data from FILE before\n'
    '                            starting the update.\n'
    '  --save-file=FILE          Save object store data to FILE after running\n'
    '                            the update.\n'
    '  --no-refresh              Do not attempt to update any data sources.\n'
    '  --no-push                 Do not push to Datastore.\n'
    '  --commit=REV              Commit ID to use for master update.\n'
    '  --no-master-update        Do not update the master commit ID.\n'
    '  --push-all                Push all entities to the Datastore even if\n'
    '                            they do not differ from the loaded cache.\n\n'
    '  --force                   Force an update even if the latest commit is'
    '                            already cached.\n')
    exit(1)

  logging.getLogger().setLevel(logging.INFO)

  data_source = opts.get('data-source', None)
  load_file = opts.get('load-file', None)
  save_file = opts.get('save-file', None)
  do_refresh = 'no-refresh' not in opts
  do_push = 'no-push' not in opts
  do_master_update = 'no-master-update' not in opts
  push_all = do_push and ('push-all' in opts)
  commit = ParseRevision(opts.get('commit', 'origin/HEAD'))
  force_update = 'force' in opts

  original_data = {}
  if load_file:
    logging.info('Loading cache...')
    PersistentObjectStoreFake.LoadFromFile(load_file)
    if not push_all:
      original_data = copy.deepcopy(PersistentObjectStoreFake.DATA)

  last_commit = _GetCachedCommitId('master')
  if ParseRevision(commit) == last_commit and not force_update:
    logging.info('Latest cache (revision %s) is up to date. Bye.' % commit)
    exit(0)

  timer = Timer()
  if do_refresh:
    logging.info('Starting refresh from commit %s...' % ParseRevision(commit))
    if data_source:
      UpdateCache(single_data_source=data_source,
                   commit=commit)
    else:
      UpdateCache(commit=commit)

  if do_push:
    from datastore_util import PushData
    if do_master_update:
      _UpdateCommitId('master', commit)
    push_timer = Timer()
    logging.info('Pushing data into datastore...')
    PushData(PersistentObjectStoreFake.DATA, original_data=original_data)
    logging.info('Done. Datastore push took %s' %
                 push_timer.Stop().FormatElapsed())
    _FlushMemcache()
  if save_file:
    PersistentObjectStoreFake.SaveToFile(save_file)

  logging.info('Update completed in %s' % timer.Stop().FormatElapsed())
Exemple #8
0
    def _GetImpl(self):
        # Cron strategy:
        #
        # Find all public template files and static files, and render them. Most of
        # the time these won't have changed since the last cron run, so it's a
        # little wasteful, but hopefully rendering is really fast (if it isn't we
        # have a problem).
        _cronlog.info('starting')

        # This is returned every time RenderServlet wants to create a new
        # ServerInstance.
        #
        # TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
        # everything. Need retry logic at the fetcher level.
        server_instance = self._GetSafeServerInstance()
        trunk_fs = server_instance.host_file_system_provider.GetTrunk()

        def render(path):
            request = Request(path, self._request.host, self._request.headers)
            delegate = _SingletonRenderServletDelegate(server_instance)
            return RenderServlet(request, delegate).Get()

        def request_files_in_dir(path, prefix='', strip_ext=None):
            '''Requests every file found under |path| in this host file system, with
      a request prefix of |prefix|. |strip_ext| is an optional list of file
      extensions that should be stripped from paths before requesting.
      '''
            def maybe_strip_ext(name):
                if name == SITE_VERIFICATION_FILE or not strip_ext:
                    return name
                base, ext = posixpath.splitext(name)
                return base if ext in strip_ext else name

            files = [
                maybe_strip_ext(name)
                for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)
            ]
            return _RequestEachItem(path, files, render)

        results = []

        try:
            # Start running the hand-written Cron methods first; they can be run in
            # parallel. They are resolved at the end.
            def run_cron_for_future(target):
                title = target.__class__.__name__
                future, init_timer = TimerClosure(target.Cron)
                assert isinstance(
                    future,
                    Future), ('%s.Cron() did not return a Future' % title)

                def resolve():
                    resolve_timer = Timer()
                    try:
                        future.Get()
                    except Exception as e:
                        _cronlog.error('%s: error %s' %
                                       (title, traceback.format_exc()))
                        results.append(False)
                        if IsDeadlineExceededError(e): raise
                    finally:
                        resolve_timer.Stop()
                        _cronlog.info(
                            '%s took %s: %s to initialize and %s to resolve' %
                            (title,
                             init_timer.With(resolve_timer).FormatElapsed(),
                             init_timer.FormatElapsed(),
                             resolve_timer.FormatElapsed()))

                return Future(delegate=Gettable(resolve))

            targets = (CreateDataSources(server_instance).values() +
                       [server_instance.content_providers])
            title = 'initializing %s parallel Cron targets' % len(targets)
            _cronlog.info(title)
            timer = Timer()
            try:
                cron_futures = [
                    run_cron_for_future(target) for target in targets
                ]
            finally:
                _cronlog.info('%s took %s' %
                              (title, timer.Stop().FormatElapsed()))

            # Rendering the public templates will also pull in all of the private
            # templates.
            results.append(
                request_files_in_dir(PUBLIC_TEMPLATES,
                                     strip_ext=('.html', '.md')))

            # Rendering the public templates will have pulled in the .js and
            # manifest.json files (for listing examples on the API reference pages),
            # but there are still images, CSS, etc.
            results.append(request_files_in_dir(STATIC_DOCS, prefix='static'))

            # Samples are too expensive to run on the dev server, where there is no
            # parallel fetch.
            if not IsDevServer():
                # Fetch each individual sample file.
                results.append(
                    request_files_in_dir(EXAMPLES,
                                         prefix='extensions/examples'))

                # Fetch the zip file of each example (contains all the individual
                # files).
                example_zips = []
                for root, _, files in trunk_fs.Walk(EXAMPLES):
                    example_zips.extend(root + '.zip' for name in files
                                        if name == 'manifest.json')
                results.append(
                    _RequestEachItem(
                        'example zips', example_zips,
                        lambda path: render('extensions/examples/' + path)))

            # Resolve the hand-written Cron method futures.
            title = 'resolving %s parallel Cron targets' % len(targets)
            _cronlog.info(title)
            timer = Timer()
            try:
                for future in cron_futures:
                    future.Get()
            finally:
                _cronlog.info('%s took %s' %
                              (title, timer.Stop().FormatElapsed()))

        except:
            results.append(False)
            # This should never actually happen (each cron step does its own
            # conservative error checking), so re-raise no matter what it is.
            _cronlog.error('uncaught error: %s' % traceback.format_exc())
            raise
        finally:
            success = all(results)
            _cronlog.info('finished (%s)', 'success' if success else 'FAILED')
            return (Response.Ok('Success')
                    if success else Response.InternalError('Failure'))
        else:
            canvas.DrawLine(lastPoint, p)

        lastPoint = p

    canvas.DrawLine(lastPoint, firstPoint)

    canvas.Show()


'''Main'''
#Carrega os pontos de um arquivo
points, min_x, min_y, max_x, max_y = getPointsFromFile()

#Exibe os pontos
if (SHOW_CANVAS):
    ShowPoints(points)

#Algoritmo do Fecho Convexo
# convex = ConvexHull(DivideAndConquerOnHull=ShowPointsAtMerge)
convex = ConvexHull()

if (SHOW_DEBUG_TIMER): localTimer = Timer(True)
result = convex.DivideAndConquer(points)
if (SHOW_DEBUG_TIMER): print(">> Fecho Executado em:", localTimer.Stop())

if (SHOW_CANVAS):
    ShowPointsWithPolygon(points, result)

saveDiagonalInFile(points, result)
    def _GetImpl(self):
        path = self._request.path.strip('/')
        parts = self._request.path.split('/', 1)
        source_name = parts[0]
        if len(parts) == 2:
            source_path = parts[1]
        else:
            source_path = None

        _log.info(
            'starting refresh of %s DataSource %s' %
            (source_name, '' if source_path is None else '[%s]' % source_path))

        if 'commit' in self._request.arguments:
            commit = self._request.arguments['commit']
        else:
            _log.warning('No commit given; refreshing from master. '
                         'This is probably NOT what you want.')
            commit = None

        server_instance = self._CreateServerInstance(commit)
        commit_tracker = CommitTracker(server_instance.object_store_creator)
        refresh_tracker = RefreshTracker(server_instance.object_store_creator)

        # If no commit was given, use the ID of the last cached master commit.
        # This allows sources external to the chromium repository to be updated
        # independently from individual refresh cycles.
        if commit is None:
            commit = commit_tracker.Get('master').Get()

        success = True
        try:
            if source_name == 'platform_bundle':
                data_source = server_instance.platform_bundle
            elif source_name == 'content_providers':
                data_source = server_instance.content_providers
            else:
                data_source = CreateDataSource(source_name, server_instance)

            class_name = data_source.__class__.__name__
            refresh_future = data_source.Refresh(source_path)
            assert isinstance(
                refresh_future,
                Future), ('%s.Refresh() did not return a Future' % class_name)
            timer = Timer()
            try:
                refresh_future.Get()

                # Mark this (commit, task) pair as completed and then see if this
                # concludes the full cache refresh. The list of tasks required to
                # complete a cache refresh is registered (and keyed on commit ID) by the
                # CronServlet before kicking off all the refresh tasks.
                (refresh_tracker.MarkTaskComplete(
                    commit,
                    path).Then(lambda _: refresh_tracker.GetRefreshComplete(
                        commit)).Then(lambda is_complete: commit_tracker.Set(
                            'master', commit) if is_complete else None).Get())
            except Exception as e:
                _log.error('%s: error %s' %
                           (class_name, traceback.format_exc()))
                success = False
                if IsFileSystemThrottledError(e):
                    return Response.ThrottledError('Throttled')
                raise
            finally:
                _log.info('Refreshing %s took %s' %
                          (class_name, timer.Stop().FormatElapsed()))

        except:
            success = False
            # This should never actually happen.
            _log.error('uncaught error: %s' % traceback.format_exc())
            raise
        finally:
            _log.info('finished (%s)', 'success' if success else 'FAILED')
            return (Response.Ok('Success')
                    if success else Response.InternalError('Failure'))
Exemple #11
0
        Array: Array de Pontos
        Array: Ponto Minimo em X
        Array: Ponto Maximo em X
        Array: Ponto Minimo em Y
        Array: Ponto Maximo em Y

    """
    points = []

    for _ in range(amount):
        point = [
            random.randint(rangeInX[0], rangeInX[1]),
            random.randint(rangeInY[0], rangeInY[1])
        ]

        points.append(point)

    return points


pointsSize = [100, 1000, 10000, 100000, 1000000]

for size in pointsSize:
    points = getRandomPoints(size)

    convex = ConvexHull()

    localTimer = Timer(True)
    convex.DivideAndConquer(points)
    print(">> Fecho Convexo de", size, "executado em:", localTimer.Stop())