Пример #1
0
    def execute(self):
        thrift_targets = self.get_targets(self._is_thrift)
        with self.invalidated(thrift_targets) as invalidation_check:
            if not invalidation_check.invalid_vts:
                return

            with self.context.new_workunit(
                    'parallel-thrift-linter') as workunit:
                worker_pool = WorkerPool(workunit.parent,
                                         self.context.run_tracker,
                                         self.get_options().worker_count)

                scrooge_linter_classpath = self.tool_classpath(
                    'scrooge-linter')
                results = []
                errors = []
                for vt in invalidation_check.invalid_vts:
                    r = worker_pool.submit_async_work(
                        Work(self._lint,
                             [(vt.target, scrooge_linter_classpath)]))
                    results.append((r, vt))
                for r, vt in results:
                    r.wait()
                    # MapResult will raise _value in `get` if the run is not successful.
                    try:
                        r.get()
                    except ThriftLintError as e:
                        errors.append(str(e))
                    else:
                        vt.update()

                if errors:
                    raise TaskError('\n'.join(errors))
Пример #2
0
  def execute(self):
    if self.get_options().skip:
      return

    thrift_targets = self.context.targets(self._is_thrift)
    with self.invalidated(thrift_targets) as invalidation_check:
      if not invalidation_check.invalid_vts:
        return

      with self.context.new_workunit('parallel-thrift-linter') as workunit:
        worker_pool = WorkerPool(workunit.parent,
                                 self.context.run_tracker,
                                 self.get_options().worker_count)

        scrooge_linter_classpath = self.tool_classpath('scrooge-linter')
        results = []
        errors = []
        for vt in invalidation_check.invalid_vts:
          r = worker_pool.submit_async_work(Work(self._lint, [(vt.target, scrooge_linter_classpath)]))
          results.append((r, vt))
        for r, vt in results:
          r.wait()
          # MapResult will raise _value in `get` if the run is not successful.
          try:
            r.get()
          except ThriftLintError as e:
            errors.append(str(e))
          else:
            vt.update()

        if errors:
          raise TaskError('\n'.join(errors))
Пример #3
0
 def background_worker_pool(self):
     if self._background_worker_pool is None:  # Initialize lazily.
         self._background_worker_pool = WorkerPool(
             parent_workunit=self.get_background_root_workunit(),
             run_tracker=self,
             num_workers=self._num_background_workers)
     return self._background_worker_pool
    def prepare_compile(self, cache_manager, all_targets, relevant_targets):
        super(JvmCompileIsolatedStrategy,
              self).prepare_compile(cache_manager, all_targets,
                                    relevant_targets)

        # Update the classpath by adding relevant target's classes directories to its classpath.
        compile_classpaths = self.context.products.get_data(
            'compile_classpath')

        with self.context.new_workunit('validate-{}-analysis'.format(
                self._compile_task_name)):
            for target in relevant_targets:
                cc = self.compile_context(target)
                safe_mkdir(cc.classes_dir)
                compile_classpaths.add_for_target(target,
                                                  [(conf, cc.classes_dir)
                                                   for conf in self._confs])
                self.validate_analysis(cc.analysis_file)

        # This ensures the workunit for the worker pool is set
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._compile_task_name)) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            self._worker_pool = WorkerPool(workunit.parent,
                                           self.context.run_tracker,
                                           self._worker_count)
Пример #5
0
  def do_compile(self, invalidation_check, compile_contexts):
    """Executes compilations for the invalid targets contained in a single chunk."""

    invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    # This ensures the workunit for the worker pool is set before attempting to compile.
    with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
            as workunit:
      # This uses workunit.parent as the WorkerPool's parent so that child workunits
      # of different pools will show up in order in the html output. This way the current running
      # workunit is on the bottom of the page rather than possibly in the middle.
      worker_pool = WorkerPool(workunit.parent,
                               self.context.run_tracker,
                               self._worker_count)

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = self.select_runtime_context(compile_contexts[target])
      safe_mkdir(cc.classes_dir)

    # Now create compile jobs for each invalid target one by one, using the classpath
    # generated by upstream JVM tasks and our own prepare_compile().
    jobs = self._create_compile_jobs(compile_contexts,
                                     invalid_targets,
                                     invalidation_check.invalid_vts)

    exec_graph = ExecutionGraph(jobs, self.get_options().print_exception_stacktrace)
    try:
      exec_graph.execute(worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
Пример #6
0
    def prepare_execute(self, chunks):
        relevant_targets = list(itertools.chain(*chunks))

        # Target -> sources (relative to buildroot).
        # TODO(benjy): Should sources_by_target be available in all Tasks?
        self._sources_by_target = self._compute_sources_by_target(
            relevant_targets)

        # Update the classpath by adding relevant target's classes directories to its classpath.
        compile_classpath = self.context.products.get_data('compile_classpath')
        runtime_classpath = self.context.products.get_data(
            'runtime_classpath', compile_classpath.copy)

        with self.context.new_workunit('validate-{}-analysis'.format(
                self._name)):
            for target in relevant_targets:
                cc = self.compile_context(target)
                safe_mkdir(cc.classes_dir)
                runtime_classpath.add_for_target(target,
                                                 [(conf, cc.classes_dir)
                                                  for conf in self._confs])
                self.validate_analysis(cc.analysis_file)

        # This ensures the workunit for the worker pool is set
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            self._worker_pool = WorkerPool(workunit.parent,
                                           self.context.run_tracker,
                                           self._worker_count)
Пример #7
0
 def foreground_worker_pool(self):
     if self._foreground_worker_pool is None:  # Initialize lazily.
         self._foreground_worker_pool = WorkerPool(
             parent_workunit=self._main_root_workunit,
             run_tracker=self,
             num_workers=self._num_foreground_workers)
     return self._foreground_worker_pool
Пример #8
0
    def execute(self):
        thrift_targets = self.get_targets(self._is_thrift)

        task_worker_count_configured = not self.get_options().is_default(
            "worker_count")
        subsystem_worker_count_configured = not ScroogeLinter.global_instance(
        ).options.is_default("worker_count")
        if task_worker_count_configured and subsystem_worker_count_configured:
            self.raise_conflicting_option("worker_count")
        worker_count = (self.get_options().worker_count
                        if task_worker_count_configured else
                        ScroogeLinter.global_instance().options.worker_count)

        with self.invalidated(thrift_targets) as invalidation_check:
            if not invalidation_check.invalid_vts:
                return

            with self.context.new_workunit(
                    'parallel-thrift-linter') as workunit:
                worker_pool = WorkerPool(workunit.parent,
                                         self.context.run_tracker,
                                         worker_count, workunit.name)

                scrooge_linter_classpath = self.tool_classpath(
                    'scrooge-linter')
                results = []
                errors = []
                for vt in invalidation_check.invalid_vts:
                    r = worker_pool.submit_async_work(
                        Work(self._lint,
                             [(vt.target, scrooge_linter_classpath)]))
                    results.append((r, vt))
                for r, vt in results:
                    r.wait()
                    # MapResult will raise _value in `get` if the run is not successful.
                    try:
                        r.get()
                    except ThriftLintError as e:
                        errors.append(str(e))
                    else:
                        vt.update()

                if errors:
                    raise TaskError('\n'.join(errors))
Пример #9
0
    def do_compile(self, invalidation_check, compile_contexts,
                   classpath_product):
        """Executes compilations for the invalid targets contained in a single chunk."""

        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        valid_targets = [
            vt.target for vt in invalidation_check.all_vts if vt.valid
        ]

        if self.execution_strategy == self.HERMETIC:
            self._set_directory_digests_for_valid_target_classpath_directories(
                valid_targets, compile_contexts)

        for valid_target in valid_targets:
            cc = self.select_runtime_context(compile_contexts[valid_target])

            classpath_product.add_for_target(
                valid_target,
                [(conf, self._classpath_for_context(cc))
                 for conf in self._confs],
            )
        self.register_extra_products_from_contexts(valid_targets,
                                                   compile_contexts)

        if not invalid_targets:
            return

        # This ensures the workunit for the worker pool is set before attempting to compile.
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            worker_pool = WorkerPool(workunit.parent, self.context.run_tracker,
                                     self._worker_count)

        # Prepare the output directory for each invalid target, and confirm that analysis is valid.
        for target in invalid_targets:
            cc = self.select_runtime_context(compile_contexts[target])
            safe_mkdir(cc.classes_dir.path)

        # Now create compile jobs for each invalid target one by one, using the classpath
        # generated by upstream JVM tasks and our own prepare_compile().
        jobs = self._create_compile_jobs(compile_contexts, invalid_targets,
                                         invalidation_check.invalid_vts,
                                         classpath_product)

        exec_graph = ExecutionGraph(
            jobs,
            self.get_options().print_exception_stacktrace)
        try:
            exec_graph.execute(worker_pool, self.context.log)
        except ExecutionFailure as e:
            raise TaskError("Compilation failure: {}".format(e))
Пример #10
0
 def test_keyboard_interrupts_propagated(self):
   condition = threading.Condition()
   condition.acquire()
   with self.assertRaises(KeyboardInterrupt):
     with temporary_dir() as rundir:
       pool = WorkerPool(WorkUnit(rundir, None, "work"), FakeRunTracker(), 1)
       try:
         pool.submit_async_work(Work(keyboard_interrupt_raiser, [()]))
         condition.wait(2)
       finally:
         pool.abort()
Пример #11
0
    def prepare_execute(self, chunks):
        relevant_targets = list(itertools.chain(*chunks))

        # Target -> sources (relative to buildroot).
        # TODO(benjy): Should sources_by_target be available in all Tasks?
        self._sources_by_target = self._compute_sources_by_target(
            relevant_targets)

        # Clone the compile_classpath to the runtime_classpath.
        compile_classpath = self.context.products.get_data('compile_classpath')
        runtime_classpath = self.context.products.get_data(
            'runtime_classpath', compile_classpath.copy)

        # This ensures the workunit for the worker pool is set
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            self._worker_pool = WorkerPool(workunit.parent,
                                           self.context.run_tracker,
                                           self._worker_count)
Пример #12
0
    def execute(self):
        # In case we have no relevant targets and return early create the requested product maps.
        self._create_empty_products()

        def select_target(target):
            return self.select(target)

        relevant_targets = list(self.context.targets(predicate=select_target))

        if not relevant_targets:
            return

        # Clone the compile_classpath to the runtime_classpath.
        compile_classpath = self.context.products.get_data('compile_classpath')
        runtime_classpath = self.context.products.get_data(
            'runtime_classpath', compile_classpath.copy)

        # This ensures the workunit for the worker pool is set
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            self._worker_pool = WorkerPool(workunit.parent,
                                           self.context.run_tracker,
                                           self._worker_count)

        classpath_product = self.context.products.get_data('runtime_classpath')
        fingerprint_strategy = self._fingerprint_strategy(classpath_product)
        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(relevant_targets,
                              invalidate_dependents=True,
                              partition_size_hint=0,
                              fingerprint_strategy=fingerprint_strategy,
                              topological_order=True) as invalidation_check:

            # Initialize the classpath for all targets.
            compile_contexts = {
                vt.target: self._compile_context(vt.target, vt.results_dir)
                for vt in invalidation_check.all_vts
            }
            for cc in compile_contexts.values():
                classpath_product.add_for_target(cc.target,
                                                 [(conf, cc.classes_dir)
                                                  for conf in self._confs])

            # Register products for valid targets.
            valid_targets = [
                vt.target for vt in invalidation_check.all_vts if vt.valid
            ]
            self._register_vts([compile_contexts[t] for t in valid_targets])

            # Build any invalid targets (which will register products in the background).
            if invalidation_check.invalid_vts:
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]

                self.compile_chunk(
                    invalidation_check, compile_contexts, invalid_targets,
                    self.extra_compile_time_classpath_elements())

            # Once compilation has completed, replace the classpath entry for each target with
            # its jar'd representation.
            classpath_products = self.context.products.get_data(
                'runtime_classpath')
            for cc in compile_contexts.values():
                for conf in self._confs:
                    classpath_products.remove_for_target(
                        cc.target, [(conf, cc.classes_dir)])
                    classpath_products.add_for_target(cc.target,
                                                      [(conf, cc.jar_file)])