Esempio n. 1
0
    def update_dependee_references(self):
        dependee_targets = self.dependency_graph()[
            # TODO: The **{} seems unnecessary.
            Target(name=self._from_address.target_name,
                   address=self._from_address,
                   build_graph=self.context.build_graph,
                   **{})]

        logging.disable(logging.WARNING)

        buildozer_binary = BuildozerBinary.scoped_instance(self)
        for concrete_target in dependee_targets:
            for formats in [{
                    'from': self._from_address.spec,
                    'to': self._to_address.spec
            }, {
                    'from':
                    ':{}'.format(self._from_address.target_name),
                    'to':
                    ':{}'.format(self._to_address.target_name)
            }]:
                buildozer_binary.execute('replace dependencies {} {}'.format(
                    formats['from'], formats['to']),
                                         spec=concrete_target.address.spec,
                                         context=self.context)

        logging.disable(logging.NOTSET)
Esempio n. 2
0
 def test_validation(self):
     target = Target(name='mybird',
                     address=Address.parse('//:mybird'),
                     build_graph=self.build_graph)
     # jars attribute must contain only JarLibrary instances
     with self.assertRaises(TargetDefinitionException):
         JarLibrary(name="test", jars=[target])
    def test_create_bad_targets(self):
        with self.assertRaises(TypeError):
            BuildFileAliases(targets={'fred': object()})

        target = Target('fred', Address.parse('a:b'),
                        LegacyBuildGraph(None, None))
        with self.assertRaises(TypeError):
            BuildFileAliases(targets={'fred': target})
Esempio n. 4
0
 def test_contains_address(self):
   a = Address.parse('a')
   self.assertFalse(self.build_graph.contains_address(a))
   target = Target(name='a',
                   address=a,
                   build_graph=self.build_graph)
   self.build_graph.inject_target(target)
   self.assertTrue(self.build_graph.contains_address(a))
Esempio n. 5
0
  def _alternate_target_roots(cls, options, address_mapper, build_graph):
    processed = set()
    for jvm_tool in JvmToolMixin.get_registered_tools():
      dep_spec = jvm_tool.dep_spec(options)
      dep_address = Address.parse(dep_spec)
      # Some JVM tools are requested multiple times, we only need to handle them once.
      if dep_address not in processed:
        processed.add(dep_address)
        try:
          if build_graph.resolve_address(dep_address):
            # The user has defined a tool classpath override - we let that stand.
            continue
        except AddressLookupError as e:
          if jvm_tool.classpath is None:
            raise cls._tool_resolve_error(e, dep_spec, jvm_tool)
          else:
            if not jvm_tool.is_default(options):
              # The user specified a target spec for this jvm tool that doesn't actually exist.
              # We want to error out here instead of just silently using the default option while
              # appearing to respect their config.
              raise cls.ToolResolveError(dedent("""
                  Failed to resolve target for tool: {tool}. This target was obtained from
                  option {option} in scope {scope}.

                  Make sure you didn't make a typo in the tool's address. You specified that the
                  tool should use the target found at "{tool}".

                  This target has a default classpath configured, so you can simply remove:
                    [{scope}]
                    {option}: {tool}
                  from pants.ini (or any other config file) to use the default tool.

                  The default classpath is: {default_classpath}

                  Note that tool target addresses in pants.ini should be specified *without* quotes.
                """).strip().format(tool=dep_spec,
                                    option=jvm_tool.key,
                                    scope=jvm_tool.scope,
                                    default_classpath=':'.join(map(str, jvm_tool.classpath or ()))))
            if jvm_tool.classpath:
              tool_classpath_target = JarLibrary(name=dep_address.target_name,
                                                 address=dep_address,
                                                 build_graph=build_graph,
                                                 jars=jvm_tool.classpath)
            else:
              # The tool classpath is empty by default, so we just inject a dummy target that
              # ivy resolves as the empty list classpath.  JarLibrary won't do since it requires
              # one or more jars, so we just pick a target type ivy has no resolve work to do for.
              tool_classpath_target = Target(name=dep_address.target_name,
                                             address=dep_address,
                                             build_graph=build_graph)
            build_graph.inject_target(tool_classpath_target, synthetic=True)

    # We use the trick of not returning alternate roots, but instead just filling the dep_spec
    # holes with a JarLibrary built from a tool's default classpath JarDependency list if there is
    # no over-riding targets present. This means we do modify the build_graph, but we at least do
    # it at a time in the engine lifecycle cut out for handling that.
    return None
Esempio n. 6
0
    def update_dependee_references(self):
        dependee_targets = self.dependency_graph()[Target(
            name=self._from_address.target_name,
            address=self._from_address,
            build_graph=[],
            **{})]

        logging.disable(logging.WARNING)

        for concrete_target in dependee_targets:
            for formats in [{
                    'from': self._from_address.spec,
                    'to': self._to_address.spec
            }, {
                    'from':
                    ':{}'.format(self._from_address.target_name),
                    'to':
                    ':{}'.format(self._to_address.target_name)
            }]:
                Buildozer.execute_binary('replace dependencies {} {}'.format(
                    formats['from'], formats['to']),
                                         spec=concrete_target.address.spec)

        logging.disable(logging.NOTSET)
Esempio n. 7
0
  def pre_compile_jobs(self, counter):

    # Create a target for the jdk outlining so that it'll only be done once per run.
    target = Target('jdk', Address('', 'jdk'), self.context.build_graph)
    index_dir = os.path.join(self.versioned_workdir, '--jdk--', 'index')

    def work_for_vts_rsc_jdk():
      distribution = self._get_jvm_distribution()
      jvm_lib_jars_abs = distribution.find_libs(['rt.jar', 'dt.jar', 'jce.jar', 'tools.jar'])
      self._jvm_lib_jars_abs = jvm_lib_jars_abs

      metacp_inputs = tuple(jvm_lib_jars_abs)

      counter_val = str(counter()).rjust(counter.format_length(), ' ' if PY3 else b' ')
      counter_str = '[{}/{}] '.format(counter_val, counter.size)
      self.context.log.info(
        counter_str,
        'Metacp-ing ',
        items_to_report_element(metacp_inputs, 'jar'),
        ' in the jdk')

      # NB: Metacp doesn't handle the existence of possibly stale semanticdb jars,
      # so we explicitly clean the directory to keep it happy.
      safe_mkdir(index_dir, clean=True)

      with Timer() as timer:
        # Step 1: Convert classpath to SemanticDB
        # ---------------------------------------
        rsc_index_dir = fast_relpath(index_dir, get_buildroot())
        args = [
          '--verbose',
          # NB: The directory to dump the semanticdb jars generated by metacp.
          '--out', rsc_index_dir,
          os.pathsep.join(metacp_inputs),
        ]
        metacp_wu = self._runtool(
          'scala.meta.cli.Metacp',
          'metacp',
          args,
          distribution,
          tgt=target,
          input_files=tuple(
            # NB: no input files because the jdk is expected to exist on the system in a known
            #     location.
            #     Related: https://github.com/pantsbuild/pants/issues/6416
          ),
          output_dir=rsc_index_dir)
        metacp_stdout = stdout_contents(metacp_wu)
        metacp_result = json.loads(metacp_stdout)

        metai_classpath = self._collect_metai_classpath(metacp_result, jvm_lib_jars_abs)

        # Step 1.5: metai Index the semanticdbs
        # -------------------------------------
        self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt=target)

        self._jvm_lib_metacp_classpath = [os.path.join(get_buildroot(), x) for x in metai_classpath]

      self._record_target_stats(target,
        len(self._jvm_lib_metacp_classpath),
        len([]),
        timer.elapsed,
        False,
        'metacp'
      )

    return [
      Job(
        'metacp(jdk)',
        functools.partial(
          work_for_vts_rsc_jdk
        ),
        [],
        self._size_estimator([]),
      ),
    ]