Esempio n. 1
0
 def _owning_targets(self, file):
   for build_file in self._candidate_owners(file):
     is_build_file = (build_file.full_path == os.path.join(get_buildroot(), file))
     for address in Target.get_all_addresses(build_file):
       target = Target.get(address)
       if target and (is_build_file or (has_sources(target) and self._owns(target, file))):
         yield target
Esempio n. 2
0
  def execute(self, targets):
    java_targets = filter(lambda t: has_sources(t, '.java'), targets)
    if not java_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(java_targets[0])

    # Add classes and resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    with self.invalidated(java_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if not self.dry_run:
        for vts in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          sources_by_target = self._process_target_partition(vts, cp)

          # TODO: Check for missing dependencies.  See ScalaCompile for an example.
          # Will require figuring out what the actual deps of a class file are.

          vts.update()
          if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
            self._write_to_artifact_cache(vts, sources_by_target)

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
          if os.path.exists(self._depfile):
            sources_by_target = self._compute_sources_by_target(java_targets)
            deps = Dependencies(self._classes_dir)
            deps.load(self._depfile)
            self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source)

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
Esempio n. 3
0
 def source_target(target):
   return (self.transitive or target in self.targets) and has_sources(target) \
       and (not is_codegen(target)
            and not (self.skip_java and is_java(target))
            and not (self.skip_scala and is_scala(target)))
Esempio n. 4
0
 def source_target(target):
   return has_sources(target) \
       and (not target.is_codegen
            and not (self.skip_java and is_java(target))
            and not (self.skip_scala and is_scala(target)))
Esempio n. 5
0
def _has_sources(target, extension):
  return has_sources(target, extension) or target.has_label('sources') and not target.sources
Esempio n. 6
0
def is_jvm(target):
  return is_java(target) or has_sources(target, '.scala')
Esempio n. 7
0
def is_idl(target):
  # TODO(Phil Hom): can be changed to is_codegen when previous hackweek thrift download hacks are
  # removed
  return is_exported(target) and has_sources(target, '.thrift')
Esempio n. 8
0
def is_scala(target):
  return has_sources(target, '.scala') or _is_scala(target)
Esempio n. 9
0
 def source_target(target):
     return has_sources(target) and not target.is_codegen
Esempio n. 10
0
def _is_scala(target):
    return has_sources(target, ".scala")
Esempio n. 11
0
def is_scala(target):
  return (isinstance(target, (ScalaLibrary, ScalaTests, ScalacPlugin))
          or (isinstance(target, (JvmBinary, junit_tests, Benchmark))
              and has_sources(target, '.scala')))
Esempio n. 12
0
def _has_sources(target, extension):
  return has_sources(target, extension) or target.has_label('sources') and not target.sources
Esempio n. 13
0
 def accept_target(target):
   return has_sources(target) and not target.is_codegen
Esempio n. 14
0
def is_idl(target):
    # TODO(Phil Hom): can be changed to is_codegen when previous hackweek thrift download hacks are
    # removed
    return is_exported(target) and has_sources(target, '.thrift')
Esempio n. 15
0
def is_jvm(target):
    return is_java(target) or has_sources(target, '.scala')
Esempio n. 16
0
def is_java(target):
    return has_sources(target, '.java')
Esempio n. 17
0
def is_scala(target):
  return (isinstance(target, (ScalaLibrary, ScalaTests, ScalacPlugin))
          or (isinstance(target, (JvmBinary, junit_tests, Benchmark))
              and has_sources(target, '.scala')))
Esempio n. 18
0
def is_java(target):
  return (isinstance(target, JavaLibrary)
          or (isinstance(target, (JvmBinary, junit_tests, Benchmark))
              and has_sources(target, '.java')))
Esempio n. 19
0
def is_java(target):
  return (isinstance(target, JavaLibrary)
          or (isinstance(target, (JvmBinary, junit_tests, Benchmark))
              and has_sources(target, '.java')))
Esempio n. 20
0
 def source_target(target):
     return has_sources(target) \
         and (not is_codegen(target)
              and not (self.skip_java and is_java(target))
              and not (self.skip_scala and is_scala(target)))
Esempio n. 21
0
 def source_target(target):
   return (self.transitive or target in self.targets) and has_sources(target) \
       and (not is_codegen(target)
            and not (self.skip_java and is_java(target))
            and not (self.skip_scala and is_scala(target)))
Esempio n. 22
0
  def execute(self, targets):
    scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
    if not scala_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(scala_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    # Add (only to the local copy) classpath entries necessary for our compiler plugins.
    for conf in self._confs:
      for jar in self._zinc_utils.plugin_jars():
        cp.insert(0, (conf, jar))

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(scala_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_sources_by_target = self._compute_sources_by_target(invalid_targets)
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._get_deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if ZincUtils.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            if self._zinc_utils.run_zinc_split(self._analysis_file,
                                               ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp),
                                                ([], valid_analysis_tmp))):
              raise TaskError('Failed to split off invalid analysis.')
            if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
              if self._zinc_utils.run_zinc_merge([self._invalid_analysis_file, newly_invalid_analysis_tmp],
                                                 invalid_analysis_tmp):
                raise TaskError('Failed to merge prior and current invalid analysis.')
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file)
            ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, splits):
              raise TaskError('Failed to split invalid analysis into per-partition files.')

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          self._process_target_partition(partition, cp)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Kick off the background artifact cache write.
            if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
              self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

            # Merge the newly-valid analysis into our global valid analysis.
            if ZincUtils.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                new_valid_analysis = analysis_file + '.valid.new'
                if self._zinc_utils.run_zinc_merge([self._analysis_file, analysis_file], new_valid_analysis):
                  raise TaskError('Failed to merge new analysis back into valid analysis file.')
              ZincUtils._move_analysis(new_valid_analysis, self._analysis_file)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              ZincUtils._copy_analysis(analysis_file, self._analysis_file)

          if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
            with self.context.new_workunit(name='trim-downstream-analysis'):
              # Trim out the newly-valid sources from our global invalid analysis.
              new_invalid_analysis = analysis_file + '.invalid.new'
              discarded_invalid_analysis = analysis_file + '.invalid.discard'
              if self._zinc_utils.run_zinc_split(self._invalid_analysis_file,
                  [(sources, discarded_invalid_analysis), ([], new_invalid_analysis)]):
                raise TaskError('Failed to trim invalid analysis file.')
              ZincUtils._move_analysis(new_invalid_analysis, self._invalid_analysis_file)

          # Now that all the analysis accounting is complete, we can safely mark the
          # targets as valid.
          vts.update()

        # Check for missing dependencies, if needed.
        if invalidation_check.invalid_vts and os.path.exists(self._analysis_file):
          deps_cache = JvmDependencyCache(self.context, scala_targets, self._analysis_file, self._classes_dir)
          deps_cache.check_undeclared_dependencies()

    # Provide the target->class and source->class mappings to downstream tasks if needed.
    if self.context.products.isrequired('classes'):
      sources_by_target = self._compute_sources_by_target(scala_targets)
      classes_by_source = self._compute_classes_by_source()
      self._add_all_products_to_genmap(sources_by_target, classes_by_source)

    # Update the classpath for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
Esempio n. 23
0
def _is_java(target):
  return has_sources(target, '.java')
Esempio n. 24
0
    def execute(self, targets):
        scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(scala_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        # Add (only to the local copy) classpath entries necessary for our compiler plugins.
        for conf in self._confs:
            for jar in self._zinc_utils.plugin_jars():
                cp.insert(0, (conf, jar))

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(scala_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint
                              ) as invalidation_check:
            if invalidation_check.invalid_vts and not self.dry_run:
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_sources_by_target = self._compute_sources_by_target(
                    invalid_targets)
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._get_deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if ZincUtils.is_nonempty_analysis(self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        if self._zinc_utils.run_zinc_split(
                                self._analysis_file,
                            ((invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp),
                             ([], valid_analysis_tmp))):
                            raise TaskError(
                                'Failed to split off invalid analysis.')
                        if ZincUtils.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            if self._zinc_utils.run_zinc_merge([
                                    self._invalid_analysis_file,
                                    newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp):
                                raise TaskError(
                                    'Failed to merge prior and current invalid analysis.'
                                )
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        ZincUtils._move_analysis(valid_analysis_tmp,
                                                 self._analysis_file)
                        ZincUtils._move_analysis(invalid_analysis_tmp,
                                                 self._invalid_analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if ZincUtils.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        if self._zinc_utils.run_zinc_split(
                                self._invalid_analysis_file, splits):
                            raise TaskError(
                                'Failed to split invalid analysis into per-partition files.'
                            )

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    self._process_target_partition(partition, cp)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Kick off the background artifact cache write.
                        if self.get_artifact_cache(
                        ) and self.context.options.write_to_artifact_cache:
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                        # Merge the newly-valid analysis into our global valid analysis.
                        if ZincUtils.is_nonempty_analysis(self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                new_valid_analysis = analysis_file + '.valid.new'
                                if self._zinc_utils.run_zinc_merge(
                                    [self._analysis_file, analysis_file],
                                        new_valid_analysis):
                                    raise TaskError(
                                        'Failed to merge new analysis back into valid analysis file.'
                                    )
                            ZincUtils._move_analysis(new_valid_analysis,
                                                     self._analysis_file)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            ZincUtils._copy_analysis(analysis_file,
                                                     self._analysis_file)

                    if ZincUtils.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            if self._zinc_utils.run_zinc_split(
                                    self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis),
                                 ([], new_invalid_analysis)]):
                                raise TaskError(
                                    'Failed to trim invalid analysis file.')
                            ZincUtils._move_analysis(
                                new_invalid_analysis,
                                self._invalid_analysis_file)

                    # Now that all the analysis accounting is complete, we can safely mark the
                    # targets as valid.
                    vts.update()

                # Check for missing dependencies, if needed.
                if invalidation_check.invalid_vts and os.path.exists(
                        self._analysis_file):
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    self._analysis_file,
                                                    self._classes_dir)
                    deps_cache.check_undeclared_dependencies()

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
            sources_by_target = self._compute_sources_by_target(scala_targets)
            classes_by_source = self._compute_classes_by_source()
            self._add_all_products_to_genmap(sources_by_target,
                                             classes_by_source)

        # Update the classpath for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])