Exemplo n.º 1
0
  def do_compile(self, invalidation_check, compile_contexts):
    """Executes compilations for the invalid targets contained in a single chunk."""

    invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    # This ensures the workunit for the worker pool is set before attempting to compile.
    with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
            as workunit:
      # This uses workunit.parent as the WorkerPool's parent so that child workunits
      # of different pools will show up in order in the html output. This way the current running
      # workunit is on the bottom of the page rather than possibly in the middle.
      worker_pool = WorkerPool(workunit.parent,
                               self.context.run_tracker,
                               self._worker_count)

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = self.select_runtime_context(compile_contexts[target])
      safe_mkdir(cc.classes_dir)

    # Now create compile jobs for each invalid target one by one, using the classpath
    # generated by upstream JVM tasks and our own prepare_compile().
    jobs = self._create_compile_jobs(compile_contexts,
                                     invalid_targets,
                                     invalidation_check.invalid_vts)

    exec_graph = ExecutionGraph(jobs, self.get_options().print_exception_stacktrace)
    try:
      exec_graph.execute(worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 2
0
    def compile_chunk(self, invalidation_check, compile_contexts,
                      invalid_targets, extra_compile_time_classpath_elements):
        """Executes compilations for the invalid targets contained in a single chunk."""
        assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

        # Prepare the output directory for each invalid target, and confirm that analysis is valid.
        for target in invalid_targets:
            cc = compile_contexts[target]
            safe_mkdir(cc.classes_dir)
            self.validate_analysis(cc.analysis_file)

        # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
        classpath_products = self.context.products.get_data(
            'runtime_classpath')

        extra_compile_time_classpath = self._compute_extra_classpath(
            extra_compile_time_classpath_elements)

        # Now create compile jobs for each invalid target one by one.
        jobs = self._create_compile_jobs(
            classpath_products, compile_contexts, extra_compile_time_classpath,
            invalid_targets, invalidation_check.invalid_vts_partitioned)

        exec_graph = ExecutionGraph(jobs)
        try:
            exec_graph.execute(self._worker_pool, self.context.log)
        except ExecutionFailure as e:
            raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 3
0
 def construct_dependee_graph_str(self, jobs, task):
     exec_graph = ExecutionGraph(
         jobs,
         task.get_options().print_exception_stacktrace)
     dependee_graph = exec_graph.format_dependee_graph()
     print(dependee_graph)
     return dependee_graph
Exemplo n.º 4
0
  def compile_chunk(self,
                    invalidation_check,
                    compile_contexts,
                    invalid_targets,
                    extra_compile_time_classpath_elements):
    """Executes compilations for the invalid targets contained in a single chunk."""
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = compile_contexts[target]
      safe_mkdir(cc.classes_dir)
      self.validate_analysis(cc.analysis_file)

    # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
    classpath_products = self.context.products.get_data('runtime_classpath')

    extra_compile_time_classpath = self._compute_extra_classpath(
        extra_compile_time_classpath_elements)

    # Now create compile jobs for each invalid target one by one.
    jobs = self._create_compile_jobs(classpath_products,
                                     compile_contexts,
                                     extra_compile_time_classpath,
                                     invalid_targets,
                                     invalidation_check.invalid_vts)

    exec_graph = ExecutionGraph(jobs)
    try:
      exec_graph.execute(self._worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
    def compile_chunk(self, invalidation_check, all_targets, relevant_targets,
                      invalid_targets, extra_compile_time_classpath_elements,
                      compile_vts, register_vts,
                      update_artifact_cache_vts_work):
        """Executes compilations for the invalid targets contained in a single chunk."""
        assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."
        # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
        compile_classpaths = self.context.products.get_data(
            'compile_classpath')

        extra_compile_time_classpath = self._compute_extra_classpath(
            extra_compile_time_classpath_elements)

        compile_contexts = self._create_compile_contexts_for_targets(
            all_targets)

        # Now create compile jobs for each invalid target one by one.
        jobs = self._create_compile_jobs(
            compile_classpaths, compile_contexts, extra_compile_time_classpath,
            invalid_targets, invalidation_check.invalid_vts_partitioned,
            compile_vts, register_vts, update_artifact_cache_vts_work)

        exec_graph = ExecutionGraph(jobs)
        try:
            exec_graph.execute(self._worker_pool, self.context.log)
        except ExecutionFailure as e:
            raise TaskError("Compilation failure: {}".format(e))
  def compile_chunk(self,
                    invalidation_check,
                    all_targets,
                    relevant_targets,
                    invalid_targets,
                    extra_compile_time_classpath_elements,
                    compile_vts,
                    register_vts,
                    update_artifact_cache_vts_work):
    """Executes compilations for the invalid targets contained in a single chunk."""
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."
    # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
    compile_classpaths = self.context.products.get_data('compile_classpath')

    extra_compile_time_classpath = self._compute_extra_classpath(
        extra_compile_time_classpath_elements)

    compile_contexts = self._create_compile_contexts_for_targets(all_targets)

    # Now create compile jobs for each invalid target one by one.
    jobs = self._create_compile_jobs(compile_classpaths,
                                     compile_contexts,
                                     extra_compile_time_classpath,
                                     invalid_targets,
                                     invalidation_check.invalid_vts_partitioned,
                                     compile_vts,
                                     register_vts,
                                     update_artifact_cache_vts_work)

    exec_graph = ExecutionGraph(jobs)
    try:
      exec_graph.execute(self._worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 7
0
  def do_compile(self, invalidation_check, compile_contexts):
    """Executes compilations for the invalid targets contained in a single chunk."""

    invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    # This ensures the workunit for the worker pool is set before attempting to compile.
    with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
            as workunit:
      # This uses workunit.parent as the WorkerPool's parent so that child workunits
      # of different pools will show up in order in the html output. This way the current running
      # workunit is on the bottom of the page rather than possibly in the middle.
      worker_pool = WorkerPool(workunit.parent,
                               self.context.run_tracker,
                               self._worker_count)

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = self.select_runtime_context(compile_contexts[target])
      safe_mkdir(cc.classes_dir)

    # Now create compile jobs for each invalid target one by one, using the classpath
    # generated by upstream JVM tasks and our own prepare_compile().
    jobs = self._create_compile_jobs(compile_contexts,
                                     invalid_targets,
                                     invalidation_check.invalid_vts)

    exec_graph = ExecutionGraph(jobs)
    try:
      exec_graph.execute(worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 8
0
    def do_compile(self, invalidation_check, compile_contexts,
                   classpath_product):
        """Executes compilations for the invalid targets contained in a single chunk."""

        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        valid_targets = [
            vt.target for vt in invalidation_check.all_vts if vt.valid
        ]

        if self.execution_strategy == self.HERMETIC:
            self._set_directory_digests_for_valid_target_classpath_directories(
                valid_targets, compile_contexts)

        for valid_target in valid_targets:
            cc = self.select_runtime_context(compile_contexts[valid_target])

            classpath_product.add_for_target(
                valid_target,
                [(conf, self._classpath_for_context(cc))
                 for conf in self._confs],
            )
        self.register_extra_products_from_contexts(valid_targets,
                                                   compile_contexts)

        if not invalid_targets:
            return

        # This ensures the workunit for the worker pool is set before attempting to compile.
        with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
                as workunit:
            # This uses workunit.parent as the WorkerPool's parent so that child workunits
            # of different pools will show up in order in the html output. This way the current running
            # workunit is on the bottom of the page rather than possibly in the middle.
            worker_pool = WorkerPool(workunit.parent, self.context.run_tracker,
                                     self._worker_count)

        # Prepare the output directory for each invalid target, and confirm that analysis is valid.
        for target in invalid_targets:
            cc = self.select_runtime_context(compile_contexts[target])
            safe_mkdir(cc.classes_dir.path)

        # Now create compile jobs for each invalid target one by one, using the classpath
        # generated by upstream JVM tasks and our own prepare_compile().
        jobs = self._create_compile_jobs(compile_contexts, invalid_targets,
                                         invalidation_check.invalid_vts,
                                         classpath_product)

        exec_graph = ExecutionGraph(
            jobs,
            self.get_options().print_exception_stacktrace)
        try:
            exec_graph.execute(worker_pool, self.context.log)
        except ExecutionFailure as e:
            raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 9
0
 def test_dumps_stack_trace(self):
     graph = ExecutionGraph([self.job("A", raising_wrapper, [])], True)
     capturing_logger = CapturingLogger()
     with self.assertRaises(ExecutionFailure):
         graph.execute(ImmediatelyExecutingPool(), capturing_logger)
     error_logs = capturing_logger.log_entries["error"]
     self.assertEqual(2, len(error_logs), msg=f"Wanted one error log, got: {error_logs}")
     regex = re.compile("A failed: I'm an error.*")
     self.assertRegex(error_logs[0], regex)
     regex = re.compile(
         'Traceback:.*in raising_wrapper.*raise Exception\\("I\'m an error.*"\\)', re.DOTALL,
     )
     self.assertRegex(error_logs[1], regex)
Exemplo n.º 10
0
 def test_dumps_stack_trace(self):
   graph = ExecutionGraph([self.job('A', raising_wrapper, [])], True)
   capturing_logger = CapturingLogger()
   with self.assertRaises(ExecutionFailure):
     graph.execute(ImmediatelyExecutingPool(), capturing_logger)
   error_logs = capturing_logger.log_entries['error']
   self.assertEquals(2, len(error_logs), msg='Wanted one error log, got: {}'.format(error_logs))
   self.assertEquals("A failed: I'm an error", error_logs[0])
   regex = re.compile(
     "Traceback:.*in raising_wrapper.*raise Exception\\(\"I'm an error\"\\)",
     re.DOTALL,
   )
   self.assertRegexpMatches(error_logs[1], regex)
Exemplo n.º 11
0
 def test_dumps_stack_trace(self):
   graph = ExecutionGraph([self.job('A', raising_wrapper, [])], True)
   capturing_logger = CapturingLogger()
   with self.assertRaises(ExecutionFailure):
     graph.execute(ImmediatelyExecutingPool(), capturing_logger)
   error_logs = capturing_logger.log_entries['error']
   self.assertEqual(2, len(error_logs), msg='Wanted one error log, got: {}'.format(error_logs))
   self.assertEqual("A failed: I'm an error", error_logs[0])
   regex = re.compile(
     "Traceback:.*in raising_wrapper.*raise Exception\\(\"I'm an error\"\\)",
     re.DOTALL,
   )
   assertRegex(self, error_logs[1], regex)
Exemplo n.º 12
0
  def do_compile(self, invalidation_check, compile_contexts, classpath_product):
    """Executes compilations for the invalid targets contained in a single chunk."""

    invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
    valid_targets = [vt.target for vt in invalidation_check.all_vts if vt.valid]

    if self.execution_strategy == self.HERMETIC:
      self._set_directory_digests_for_valid_target_classpath_directories(valid_targets, compile_contexts)

    for valid_target in valid_targets:
      cc = self.select_runtime_context(compile_contexts[valid_target])

      classpath_product.add_for_target(
        valid_target,
        [(conf, self._classpath_for_context(cc)) for conf in self._confs],
      )
    self.register_extra_products_from_contexts(valid_targets, compile_contexts)

    if not invalid_targets:
      return

    # This ensures the workunit for the worker pool is set before attempting to compile.
    with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
            as workunit:
      # This uses workunit.parent as the WorkerPool's parent so that child workunits
      # of different pools will show up in order in the html output. This way the current running
      # workunit is on the bottom of the page rather than possibly in the middle.
      worker_pool = WorkerPool(workunit.parent,
                               self.context.run_tracker,
                               self._worker_count)

    # Prepare the output directory for each invalid target, and confirm that analysis is valid.
    for target in invalid_targets:
      cc = self.select_runtime_context(compile_contexts[target])
      safe_mkdir(cc.classes_dir.path)

    # Now create compile jobs for each invalid target one by one, using the classpath
    # generated by upstream JVM tasks and our own prepare_compile().
    jobs = self._create_compile_jobs(compile_contexts,
                                     invalid_targets,
                                     invalidation_check.invalid_vts,
                                     classpath_product)

    exec_graph = ExecutionGraph(jobs, self.get_options().print_exception_stacktrace)
    try:
      exec_graph.execute(worker_pool, self.context.log)
    except ExecutionFailure as e:
      raise TaskError("Compilation failure: {}".format(e))
Exemplo n.º 13
0
  def test_simple_binary_tree(self):
    exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
                                 self.job("B", passing_fn, []),
                                 self.job("C", passing_fn, [])], False)
    self.execute(exec_graph)

    self.assertEqual(self.jobs_run, ["B", "C", "A"])
Exemplo n.º 14
0
 def test_priorities_for_mirrored_fork(self):
   exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 4),
                                self.job("B", passing_fn, ["A"], 1),
                                self.job("C", passing_fn, ["A"], 2)], False)
   self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 1, "C": 2})
   self.execute(exec_graph)
   self.assertEqual(self.jobs_run, ["A", "C", "B"])
Exemplo n.º 15
0
    def test_same_key_scheduled_twice_is_error(self):
        with self.assertRaises(JobExistsError) as cm:
            ExecutionGraph(
                [self.job("Same", passing_fn, []), self.job("Same", passing_fn, [])], False
            )

        self.assertEqual("Unexecutable graph: Job already scheduled 'Same'", str(cm.exception))
Exemplo n.º 16
0
    def test_single_dependency(self):
        exec_graph = ExecutionGraph(
            [self.job("A", passing_fn, ["B"]),
             self.job("B", passing_fn, [])], False)
        self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["B", "A"])
Exemplo n.º 17
0
    def test_jobs_not_canceled_multiple_times(self):
        failures = list()

        def collect_failure(jobname):
            def fn():
                failures.append(jobname)

            return fn

        def my_job(name, result_fn, deps):
            return self.job(name,
                            result_fn,
                            deps,
                            1,
                            on_failure=collect_failure(name))

        exec_graph = ExecutionGraph(
            [
                my_job("A", raising_fn, []),
                my_job("B1", passing_fn, ["A"]),
                my_job("B2", passing_fn, ["A"]),
                my_job("C1", passing_fn, ["B1", "B2"]),
                my_job("C2", passing_fn, ["B1", "B2"]),
                my_job("E", passing_fn, ["C2"]),
            ],
            False,
        )

        with self.assertRaises(ExecutionFailure):
            self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["A"])
        self.assertEqual(failures, ["A", "B1", "B2", "C1", "C2", "E"])
Exemplo n.º 18
0
  def test_non_existent_dependency_causes_failure(self):
    with self.assertRaises(UnknownJobError) as cm:
      ExecutionGraph([self.job("A", passing_fn, []),
                      self.job("B", passing_fn, ["Z"])], False)

    self.assertEqual("Unexecutable graph: Undefined dependencies 'Z'",
                     str(cm.exception))
Exemplo n.º 19
0
  def test_failure_of_disconnected_job_does_not_cancel_non_dependents(self):
    exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
                                 self.job("F", raising_fn, [])], False)
    with self.assertRaises(ExecutionFailure):
      self.execute(exec_graph)

    self.assertEqual(["A", "F"], self.jobs_run)
Exemplo n.º 20
0
    def test_on_failure_callback_raises_error(self):
        exec_graph = ExecutionGraph([self.job("A", raising_fn, [], on_failure=raising_fn)], False)

        with self.assertRaises(ExecutionFailure) as cm:
            self.execute(exec_graph)

        self.assertEqual("Error in on_failure for A: I'm an error", str(cm.exception))
Exemplo n.º 21
0
    def test_jobs_not_canceled_multiple_times(self):
        failures = list()

        def collect_failure(jobname):
            def fn():
                failures.append(jobname)

            return fn

        def my_job(name, result_fn, deps):
            return self.job(name,
                            result_fn,
                            deps,
                            1,
                            on_failure=collect_failure(name))

        exec_graph = ExecutionGraph([
            my_job('A', raising_fn, []),
            my_job('B1', passing_fn, ['A']),
            my_job('B2', passing_fn, ['A']),
            my_job('C1', passing_fn, ['B1', 'B2']),
            my_job('C2', passing_fn, ['B1', 'B2']),
            my_job('E', passing_fn, ['C2'])
        ], False)

        with self.assertRaises(ExecutionFailure):
            self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ['A'])
        self.assertEqual(failures, ['A', 'B1', 'B2', 'C1', 'C2', 'E'])
Exemplo n.º 22
0
  def test_failure_of_dependency_does_not_run_dependents(self):
    exec_graph = ExecutionGraph([self.job("A", passing_fn, ["F"]),
                                 self.job("F", raising_fn, [])], False)
    with self.assertRaises(ExecutionFailure) as cm:
      self.execute(exec_graph)

    self.assertEqual(["F"], self.jobs_run)
    self.assertEqual("Failed jobs: F", str(cm.exception))
Exemplo n.º 23
0
 def test_priorities_for_mirrored_diamond(self):
   exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
                                self.job("B", passing_fn, ["A"], 2),
                                self.job("C", passing_fn, ["A"], 4),
                                self.job("D", passing_fn, ["B", "C"], 1)], False)
   self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 3, "C": 5, "D": 1})
   self.execute(exec_graph)
   self.assertEqual(self.jobs_run, ["A", "C", "B", "D"])
Exemplo n.º 24
0
    def test_base_exception_failure_raises_exception(self):
        # BaseException happens for lower level issues, not catching and propagating it makes debugging
        # difficult.
        exec_graph = ExecutionGraph([self.job("A", base_error_raising_fn, [])], False)
        with self.assertRaises(ExecutionFailure) as cm:
            self.execute(exec_graph)

        self.assertEqual("Failed jobs: A", str(cm.exception))
Exemplo n.º 25
0
    def test_simple_unconnected(self):
        exec_graph = ExecutionGraph(
            [self.job("A", passing_fn, []), self.job("B", passing_fn, [])], False
        )

        self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["A", "B"])
Exemplo n.º 26
0
  def test_dependee_depends_on_dependency_of_its_dependency(self):
    exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
                                 self.job("B", passing_fn, ["C"]),
                                 self.job("C", passing_fn, []),
    ], False)

    self.execute(exec_graph)

    self.assertEqual(["C", "B", "A"], self.jobs_run)
Exemplo n.º 27
0
 def test_priorities_for_skewed_diamond(self):
   exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 1),
                                self.job("B", passing_fn, ["A"], 2),
                                self.job("C", passing_fn, ["B"], 4),
                                self.job("D", passing_fn, ["A"], 8),
                                self.job("E", passing_fn, ["C", "D"], 16)], False)
   self.assertEqual(exec_graph._job_priority, {"A": 25, "B": 22, "C": 20, "D": 24, "E": 16})
   self.execute(exec_graph)
   self.assertEqual(self.jobs_run, ["A", "D", "B", "C", "E"])
Exemplo n.º 28
0
  def test_cycle_in_graph_causes_failure(self):
    with self.assertRaises(NoRootJobError) as cm:
      ExecutionGraph([self.job("A", passing_fn, ["B"]),
                      self.job("B", passing_fn, ["A"])], False)

    self.assertEqual(
      "Unexecutable graph: All scheduled jobs have dependencies. "
      "There must be a circular dependency.",
      str(cm.exception))
Exemplo n.º 29
0
    def test_simple_linear_dependencies(self):
        exec_graph = ExecutionGraph([
            self.job("A", passing_fn, ["B"]),
            self.job("B", passing_fn, ["C"]),
            self.job("C", passing_fn, [])
        ])

        self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["C", "B", "A"])
Exemplo n.º 30
0
    def test_simple_unconnected_tree(self):
        exec_graph = ExecutionGraph([
            self.job("A", passing_fn, ["B"]),
            self.job("B", passing_fn, []),
            self.job("C", passing_fn, []),
        ])

        self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["B", "C", "A"])
Exemplo n.º 31
0
  def test_failure_of_one_leg_of_tree_does_not_cancel_other(self):
    # TODO do we want this behavior, or do we want to fail fast on the first failed job?
    exec_graph = ExecutionGraph([self.job("B", passing_fn, []),
                                 self.job("F", raising_fn, ["B"]),
                                 self.job("A", passing_fn, ["B"])], False)
    with self.assertRaises(ExecutionFailure) as cm:
      self.execute(exec_graph)

    self.assertTrue(self.jobs_run == ["B", "F", "A"] or self.jobs_run == ["B", "A", "F"])
    self.assertEqual("Failed jobs: F", str(cm.exception))
Exemplo n.º 32
0
 def test_priorities_for_chain_of_jobs(self):
     exec_graph = ExecutionGraph(
         [
             self.job("A", passing_fn, [], 8),
             self.job("B", passing_fn, ["A"], 4),
             self.job("C", passing_fn, ["B"], 2),
             self.job("D", passing_fn, ["C"], 1),
         ],
         False,
     )
     self.assertEqual(exec_graph._job_priority, {"A": 15, "B": 7, "C": 3, "D": 1})
     self.execute(exec_graph)
     self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
Exemplo n.º 33
0
    def test_single_job(self):
        exec_graph = ExecutionGraph([self.job("A", passing_fn, [])])

        self.execute(exec_graph)

        self.assertEqual(self.jobs_run, ["A"])
Exemplo n.º 34
0
 def construct_dependee_graph_str(self, jobs, task):
   exec_graph = ExecutionGraph(jobs, task.get_options().print_exception_stacktrace)
   dependee_graph = exec_graph.format_dependee_graph()
   print(dependee_graph)
   return dependee_graph
Exemplo n.º 35
0
    def test_metacp_job_scheduled_for_jar_library(self):
        # Init dependencies for scala library targets.
        init_subsystem(
            ScalaPlatform, {
                ScalaPlatform.options_scope: {
                    'version': 'custom',
                    'suffix_version': '2.12',
                }
            })
        self.make_target(
            '//:scala-library',
            target_type=JarLibrary,
            jars=[JarDependency(org='com.example', name='scala', rev='0.0.0')])

        jar_target = self.make_target('java/classpath:jar_lib',
                                      target_type=JarLibrary,
                                      jars=[
                                          JarDependency(org='com.example',
                                                        name='example',
                                                        rev='0.0.0')
                                      ])

        java_target = self.make_target('java/classpath:java_lib',
                                       target_type=JavaLibrary,
                                       sources=['com/example/Foo.java'],
                                       dependencies=[jar_target])

        scala_target = self.make_target('java/classpath:scala_lib',
                                        target_type=ScalaLibrary,
                                        sources=['com/example/Foo.scala'],
                                        dependencies=[jar_target])

        context = self.context(target_roots=[jar_target])

        context.products.get_data(
            'compile_classpath',
            ClasspathProducts.init_func(self.pants_workdir))
        context.products.get_data(
            'runtime_classpath',
            ClasspathProducts.init_func(self.pants_workdir))

        task = self.create_task(context)
        # tried for options, but couldn't get it to reconfig
        task._size_estimator = lambda srcs: 0
        with temporary_dir() as tmp_dir:
            compile_contexts = {
                target:
                task.create_compile_context(target,
                                            os.path.join(tmp_dir, target.id))
                for target in [jar_target, java_target, scala_target]
            }

            invalid_targets = [java_target, scala_target, jar_target]

            jobs = task._create_compile_jobs(
                compile_contexts,
                invalid_targets,
                invalid_vts=[LightWeightVTS(t) for t in invalid_targets],
                classpath_product=None)

            exec_graph = ExecutionGraph(
                jobs,
                task.get_options().print_exception_stacktrace)
            dependee_graph = exec_graph.format_dependee_graph()

            self.assertEqual(
                dedent("""
                     metacp(jdk) -> {
                       rsc(java/classpath:scala_lib),
                       compile_against_rsc(java/classpath:scala_lib),
                       metacp(java/classpath:jar_lib)
                     }
                     compile_against_rsc(java/classpath:java_lib) -> {}
                     rsc(java/classpath:scala_lib) -> {
                       compile_against_rsc(java/classpath:scala_lib)
                     }
                     compile_against_rsc(java/classpath:scala_lib) -> {}
                     metacp(java/classpath:jar_lib) -> {
                       rsc(java/classpath:scala_lib)
                     }""").strip(), dependee_graph)
Exemplo n.º 36
0
    def test_one_failure_raises_exception(self):
        exec_graph = ExecutionGraph([self.job("A", raising_fn, [])], False)
        with self.assertRaises(ExecutionFailure) as cm:
            self.execute(exec_graph)

        self.assertEqual("Failed jobs: A", str(cm.exception))