コード例 #1
0
ファイル: scala_compile.py プロジェクト: lxwuchang/commons
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if scala_targets:
      safe_mkdir(self._depfile_dir)
      safe_mkdir(self._analysis_cache_dir)

      # Map from output directory to { analysis_cache_dir, [ analysis_cache_file ]}
      upstream_analysis_caches = self.context.products.get('upstream')

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          for jar in self._plugin_jars:
            cp.insert(0, (conf, jar))

      with self.invalidated(scala_targets, invalidate_dependants=True,
          partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.all_vts:
          if vt.valid:  # Don't compile, just post-process.
            self.post_process(vt, upstream_analysis_caches, split_artifact=False)
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp, upstream_analysis_caches)
          if not self.dry_run:
            vt.update()
      deps_cache = JvmDependencyCache(self, scala_targets)
      deps_cache.check_undeclared_dependencies()
コード例 #2
0
ファイル: scala_compile.py プロジェクト: samitny/commons
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if not scala_targets:
      return

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)
      with self.context.state('upstream_analysis_map', {}) as upstream_analysis_map:
        with self.invalidated(scala_targets, invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint) as invalidation_check:
          # Process partitions one by one.
          for vts in invalidation_check.all_vts_partitioned:
            if not self.dry_run:
              merged_artifact = self._process_target_partition(vts, cp, upstream_analysis_map)
              vts.update()
              # Note that we add the merged classes_dir to the upstream.
              # This is because zinc doesn't handle many upstream dirs well.
              if os.path.exists(merged_artifact.classes_dir):
                for conf in self._confs:
                  cp.append((conf, merged_artifact.classes_dir))
                if os.path.exists(merged_artifact.analysis_file):
                  upstream_analysis_map[merged_artifact.classes_dir] = merged_artifact.analysis_file

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      analysis_file = self._artifact_factory.analysis_file_for_targets([target])
      if os.path.exists(analysis_file):
        all_analysis_files.add(analysis_file)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #3
0
ファイル: scala_compile.py プロジェクト: jalons/commons
  def execute(self, targets):
    scala_targets = filter(_is_scala, targets)
    if not scala_targets:
      return

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)
      with self.context.state('upstream_analysis_map', {}) as upstream_analysis_map:
        with self.invalidated(scala_targets, invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint) as invalidation_check:
          # Process partitions one by one.
          for vts in invalidation_check.all_vts_partitioned:
            if not self.dry_run:
              merged_artifact = self._process_target_partition(vts, cp, upstream_analysis_map)
              vts.update()
              # Note that we add the merged classes_dir to the upstream.
              # This is because zinc doesn't handle many upstream dirs well.
              if os.path.exists(merged_artifact.classes_dir):
                for conf in self._confs:
                  cp.append((conf, merged_artifact.classes_dir))
                if os.path.exists(merged_artifact.analysis_file):
                  upstream_analysis_map[merged_artifact.classes_dir] = \
                    AnalysisFileSpec(merged_artifact.analysis_file, merged_artifact.classes_dir)

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      analysis_file_spec = self._artifact_factory.analysis_file_for_targets([target])
      if os.path.exists(analysis_file_spec.analysis_file):
        all_analysis_files.add(analysis_file_spec)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #4
0
ファイル: scala_compile.py プロジェクト: UrbanCompass/commons
    def setup_parser(cls, option_group, args, mkflag):
        NailgunTask.setup_parser(option_group, args, mkflag)

        option_group.add_option(
            mkflag("warnings"),
            mkflag("warnings", negate=True),
            dest="scala_compile_warnings",
            default=True,
            action="callback",
            callback=mkflag.set_bool,
            help="[%default] Compile scala code with all configured warnings " "enabled.",
        )

        option_group.add_option(
            mkflag("plugins"),
            dest="plugins",
            default=None,
            action="append",
            help="Use these scalac plugins. Default is set in pants.ini.",
        )

        option_group.add_option(
            mkflag("partition-size-hint"),
            dest="scala_compile_partition_size_hint",
            action="store",
            type="int",
            default=-1,
            help="Roughly how many source files to attempt to compile together. Set to a large number "
            "to compile all sources together. Set this to 0 to compile target-by-target. "
            "Default is set in pants.ini.",
        )

        JvmDependencyCache.setup_parser(option_group, args, mkflag)
コード例 #5
0
ファイル: scala_compile.py プロジェクト: wfarner/commons
    def setup_parser(cls, option_group, args, mkflag):
        NailgunTask.setup_parser(option_group, args, mkflag)

        option_group.add_option(
            mkflag('warnings'),
            mkflag('warnings', negate=True),
            dest='scala_compile_warnings',
            default=True,
            action='callback',
            callback=mkflag.set_bool,
            help='[%default] Compile scala code with all configured warnings '
            'enabled.')

        option_group.add_option(
            mkflag('plugins'),
            dest='plugins',
            default=None,
            action='append',
            help='Use these scalac plugins. Default is set in pants.ini.')

        option_group.add_option(mkflag('partition-size-hint'), dest='scala_compile_partition_size_hint',
          action='store', type='int', default=-1,
          help='Roughly how many source files to attempt to compile together. Set to a large number ' \
               'to compile all sources together. Set this to 0 to compile target-by-target. ' \
               'Default is set in pants.ini.')

        JvmDependencyCache.setup_parser(option_group, args, mkflag)
コード例 #6
0
ファイル: scala_compile.py プロジェクト: forestlzj/commons
  def __init__(self, context, workdir=None):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    color = context.options.scala_compile_color or \
            context.config.getbool('scala-compile', 'color', default=True)

    self._zinc_utils = ZincUtils(context=context, java_runner=self.runjava, color=color)

    self._partition_size_hint = \
      context.options.scala_compile_partition_size_hint \
      if context.options.scala_compile_partition_size_hint != -1 else \
      context.config.getint('scala-compile', 'partition_size_hint')

    self.check_missing_deps = context.options.scala_check_missing_deps
    self.check_intransitive_deps = context.options.scala_check_intransitive_deps
    self.check_unnecessary_deps = context.options.scala_check_unnecessary_deps
    if self.check_missing_deps:
      JvmDependencyCache.init_product_requirements(self)

    # for dependency analysis, we need to record the list of analysis cache files generated by splitting
    self.generated_caches = set()

    workdir = context.config.get('scala-compile', 'workdir') if workdir is None else workdir
    self._classes_dir = os.path.join(workdir, 'classes')
    self._analysis_cache_dir = os.path.join(workdir, 'analysis_cache')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._depfile_dir = os.path.join(workdir, 'depfiles')

    self._confs = context.config.getlist('scala-compile', 'confs')

    artifact_cache_spec = context.config.getlist('scala-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)
コード例 #7
0
ファイル: scala_compile.py プロジェクト: samitny/commons
  def __init__(self, context, workdir=None):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    # Set up the zinc utils.
    color = context.options.scala_compile_color or \
            context.config.getbool('scala-compile', 'color', default=True)

    self._zinc_utils = ZincUtils(context=context, java_runner=self.runjava, color=color)

    # The rough number of source files to build in each compiler pass.
    self._partition_size_hint = \
      context.options.scala_compile_partition_size_hint \
      if context.options.scala_compile_partition_size_hint != -1 else \
      context.config.getint('scala-compile', 'partition_size_hint')

    # Set up dep checking if needed.
    if context.options.scala_check_missing_deps:
      JvmDependencyCache.init_product_requirements(self)

    # Various output directories.
    self._buildroot = get_buildroot()
    workdir = context.config.get('scala-compile', 'workdir') if workdir is None else workdir
    self._resources_dir = os.path.join(workdir, 'resources')
    self._artifact_factory = ZincArtifactFactory(workdir, self.context, self._zinc_utils)

    # The ivy confs for which we're building.
    self._confs = context.config.getlist('scala-compile', 'confs')

    # The artifact cache to read from/write to.
    artifact_cache_spec = context.config.getlist('scala-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)
コード例 #8
0
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if not scala_targets:
      return

    safe_mkdir(self._classes_dir_base)
    safe_mkdir(self._depfiles_base)
    safe_mkdir(self._analysis_files_base)

    # Get the classpath generated by upstream JVM tasks (including previous calls to this execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)

      with self.invalidated_with_artifact_cache_check(
          scala_targets,
          invalidate_dependents=True,
          partition_size_hint=self._partition_size_hint) as (invalidation_check, cached_vts):
        # Localize the analysis files we read from the artifact cache.
        self._localize_portable_artifact_files(cached_vts)
        # Compile partitions one by one.
        self._compile_all(invalidation_check.invalid_vts_partitioned, scala_targets, cp)

      # Post-processing we perform for all targets, whether they needed compilation or not.
      for target in scala_targets:
        self._post_process(target, cp)

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      _, _, analysis_file = self._output_paths([target])
      if os.path.exists(analysis_file):
        all_analysis_files.add(analysis_file)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #9
0
  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    # Set up the zinc utils.
    color = not context.options.no_color
    self._zinc_utils = ZincUtils(context=context, nailgun_task=self, color=color)

    # The rough number of source files to build in each compiler pass.
    self._partition_size_hint = (context.options.scala_compile_partition_size_hint
                                 if context.options.scala_compile_partition_size_hint != -1
                                 else context.config.getint('scala-compile', 'partition_size_hint',
                                                            default=1000))

    # Set up dep checking if needed.
    if context.options.scala_check_missing_deps:
      JvmDependencyCache.init_product_requirements(self)

    self._opts = context.config.getlist('scala-compile', 'args')
    if context.options.scala_compile_warnings:
      self._opts.extend(context.config.getlist('scala-compile', 'warning_args'))
    else:
      self._opts.extend(context.config.getlist('scala-compile', 'no_warning_args'))

    # Various output directories.
    workdir = context.config.get('scala-compile', 'workdir')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._analysis_dir = os.path.join(workdir, 'analysis')

    safe_mkdir(self._classes_dir)
    safe_mkdir(self._analysis_dir)

    self._analysis_file = os.path.join(self._analysis_dir, 'global_analysis.valid')
    self._invalid_analysis_file = os.path.join(self._analysis_dir, 'global_analysis.invalid')
    self._resources_dir = os.path.join(workdir, 'resources')

    # The ivy confs for which we're building.
    self._confs = context.config.getlist('scala-compile', 'confs')

    self.context.products.require_data('exclusives_groups')

    self._local_artifact_cache_spec = \
      context.config.getlist('scala-compile', 'local_artifact_caches2', default=[])
    self._remote_artifact_cache_spec = \
      context.config.getlist('scala-compile', 'remote_artifact_caches2', default=[])

    # A temporary, but well-known, dir to munge analysis files in before caching. It must be
    # well-known so we know where to find the files when we retrieve them from the cache.
    self._analysis_tmpdir = os.path.join(self._analysis_dir, 'artifact_cache_tmpdir')

    # If we are compiling scala libraries with circular deps on java libraries we need to make sure
    # those cycle deps are present.
    self._inject_java_cycles()

    # Sources present in the last analysis that have since been deleted.
    # Generated lazily, so do not access directly. Call self._get_deleted_sources().
    self._deleted_sources = None
コード例 #10
0
ファイル: scala_compile.py プロジェクト: wfarner/commons
    def execute(self, targets):
        scala_targets = filter(_is_scala, targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        exclusives_key = egroups.get_group_key_for_target(targets[0])
        exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)

        with self.context.state('upstream_analysis_map',
                                {}) as upstream_analysis_map:
            with self.invalidated(scala_targets,
                                  invalidate_dependents=True,
                                  partition_size_hint=self._partition_size_hint
                                  ) as invalidation_check:
                # Process partitions one by one.
                for vts in invalidation_check.all_vts_partitioned:
                    # Refresh the classpath, to pick up any changes from update_compatible_classpaths.
                    exclusives_classpath = egroups.get_classpath_for_group(
                        exclusives_key)
                    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
                    # Add the global classpaths here, directly, instead of doing the
                    # add-to-compatible thing.
                    self._add_globally_required_classpath_entries(
                        exclusives_classpath)

                    if not self.dry_run:
                        merged_artifact = self._process_target_partition(
                            vts, exclusives_classpath, upstream_analysis_map)
                        vts.update()
                        # Note that we add the merged classes_dir to the upstream.
                        # This is because zinc doesn't handle many upstream dirs well.
                        if os.path.exists(merged_artifact.classes_dir):
                            for conf in self._confs:  ### CLASSPATH UPDATE
                                # Update the exclusives group classpaths.
                                egroups.update_compatible_classpaths(
                                    exclusives_key,
                                    [(conf, merged_artifact.classes_dir)])
                            if os.path.exists(merged_artifact.analysis_file):
                                upstream_analysis_map[merged_artifact.classes_dir] = \
                                  AnalysisFileSpec(merged_artifact.analysis_file, merged_artifact.classes_dir)
                if invalidation_check.invalid_vts:
                    # Check for missing dependencies.
                    all_analysis_files = set()
                    for target in scala_targets:
                        analysis_file_spec = self._artifact_factory.analysis_file_for_targets(
                            [target])
                        if os.path.exists(analysis_file_spec.analysis_file):
                            all_analysis_files.add(analysis_file_spec)
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    all_analysis_files)
                    deps_cache.check_undeclared_dependencies()
コード例 #11
0
ファイル: scala_compile.py プロジェクト: UrbanCompass/commons
    def execute(self, targets):
        scala_targets = filter(_is_scala, targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data("exclusives_groups")
        exclusives_key = egroups.get_group_key_for_target(targets[0])
        exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)

        with self.context.state("upstream_analysis_map", {}) as upstream_analysis_map:
            with self.invalidated(
                scala_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint
            ) as invalidation_check:
                # Process partitions one by one.
                for vts in invalidation_check.all_vts_partitioned:
                    # Refresh the classpath, to pick up any changes from update_compatible_classpaths.
                    exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)
                    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
                    # Add the global classpaths here, directly, instead of doing the
                    # add-to-compatible thing.
                    self._add_globally_required_classpath_entries(exclusives_classpath)

                    if not self.dry_run:
                        merged_artifact = self._process_target_partition(
                            vts, exclusives_classpath, upstream_analysis_map
                        )
                        vts.update()
                        # Note that we add the merged classes_dir to the upstream.
                        # This is because zinc doesn't handle many upstream dirs well.
                        if os.path.exists(merged_artifact.classes_dir):
                            for conf in self._confs:  ### CLASSPATH UPDATE
                                # Update the exclusives group classpaths.
                                egroups.update_compatible_classpaths(
                                    exclusives_key, [(conf, merged_artifact.classes_dir)]
                                )
                            if os.path.exists(merged_artifact.analysis_file):
                                upstream_analysis_map[merged_artifact.classes_dir] = AnalysisFileSpec(
                                    merged_artifact.analysis_file, merged_artifact.classes_dir
                                )
                if invalidation_check.invalid_vts:
                    # Check for missing dependencies.
                    all_analysis_files = set()
                    for target in scala_targets:
                        analysis_file_spec = self._artifact_factory.analysis_file_for_targets([target])
                        if os.path.exists(analysis_file_spec.analysis_file):
                            all_analysis_files.add(analysis_file_spec)
                    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
                    deps_cache.check_undeclared_dependencies()
コード例 #12
0
ファイル: scala_compile.py プロジェクト: wfarner/commons
    def __init__(self, context):
        NailgunTask.__init__(self,
                             context,
                             workdir=context.config.get(
                                 'scala-compile', 'nailgun_dir'))

        # Set up the zinc utils.
        color = not context.options.no_color
        self._zinc_utils = ZincUtils(context=context,
                                     nailgun_task=self,
                                     color=color)

        # The rough number of source files to build in each compiler pass.
        self._partition_size_hint = (
            context.options.scala_compile_partition_size_hint
            if context.options.scala_compile_partition_size_hint != -1 else
            context.config.getint(
                'scala-compile', 'partition_size_hint', default=1000))

        # Set up dep checking if needed.
        if context.options.scala_check_missing_deps:
            JvmDependencyCache.init_product_requirements(self)

        self._opts = context.config.getlist('scala-compile', 'args')
        if context.options.scala_compile_warnings:
            self._opts.extend(
                context.config.getlist('scala-compile', 'warning_args'))
        else:
            self._opts.extend(
                context.config.getlist('scala-compile', 'no_warning_args'))

        # Various output directories.
        workdir = context.config.get('scala-compile', 'workdir')
        self._resources_dir = os.path.join(workdir, 'resources')
        self._artifact_factory = ZincArtifactFactory(workdir, self.context,
                                                     self._zinc_utils)

        # The ivy confs for which we're building.
        self._confs = context.config.getlist('scala-compile', 'confs')

        # The artifact cache to read from/write to.
        artifact_cache_spec = context.config.getlist('scala-compile',
                                                     'artifact_caches',
                                                     default=[])
        self.setup_artifact_cache(artifact_cache_spec)

        # If we are compiling scala libraries with circular deps on java libraries we need to make sure
        # those cycle deps are present.
        self._inject_java_cycles()
コード例 #13
0
ファイル: scala_compile.py プロジェクト: steliokontos/commons
    def __init__(self, context):
        NailgunTask.__init__(self, context, workdir=context.config.get("scala-compile", "nailgun_dir"))

        # Set up the zinc utils.
        # Command line switch overrides color setting set in pants.ini
        color = (
            context.options.scala_compile_color
            if context.options.scala_compile_color is not None
            else context.config.getbool("scala-compile", "color", default=True)
        )

        self._zinc_utils = ZincUtils(context=context, java_runner=self.runjava, color=color)

        # The rough number of source files to build in each compiler pass.
        self._partition_size_hint = (
            context.options.scala_compile_partition_size_hint
            if context.options.scala_compile_partition_size_hint != -1
            else context.config.getint("scala-compile", "partition_size_hint", default=1000)
        )

        # Set up dep checking if needed.
        if context.options.scala_check_missing_deps:
            JvmDependencyCache.init_product_requirements(self)

        self._opts = context.config.getlist("scala-compile", "args")
        if context.options.scala_compile_warnings:
            self._opts.extend(context.config.getlist("scala-compile", "warning_args"))
        else:
            self._opts.extend(context.config.getlist("scala-compile", "no_warning_args"))

        # Various output directories.
        workdir = context.config.get("scala-compile", "workdir")
        self._resources_dir = os.path.join(workdir, "resources")
        self._artifact_factory = ZincArtifactFactory(workdir, self.context, self._zinc_utils)

        # The ivy confs for which we're building.
        self._confs = context.config.getlist("scala-compile", "confs")

        # The artifact cache to read from/write to.
        artifact_cache_spec = context.config.getlist("scala-compile", "artifact_caches")
        self.setup_artifact_cache(artifact_cache_spec)

        # If we are compiling scala libraries with circular deps on java libraries we need to make sure
        # those cycle deps are present.
        self._inject_java_cycles()
コード例 #14
0
ファイル: scala_compile.py プロジェクト: benhuang-zh/commons
  def setup_parser(cls, option_group, args, mkflag):
    NailgunTask.setup_parser(option_group, args, mkflag)

    option_group.add_option(mkflag('warnings'), mkflag('warnings', negate=True),
                            dest='scala_compile_warnings', default=True,
                            action='callback', callback=mkflag.set_bool,
                            help='[%default] Compile scala code with all configured warnings '
                                 'enabled.')

    option_group.add_option(mkflag('plugins'), dest='plugins', default=None,
      action='append', help='Use these scalac plugins. Default is set in pants.ini.')

    option_group.add_option(mkflag('partition-size-hint'), dest='scala_compile_partition_size_hint',
      action='store', type='int', default=-1,
      help='Roughly how many source files to attempt to compile together. Set to a large number ' \
           'to compile all sources together. Set this to 0 to compile target-by-target. ' \
           'Default is set in pants.ini.')

    JvmDependencyCache.setup_parser(option_group, args, mkflag)
コード例 #15
0
    def __init__(self, context, workdir=None):
        NailgunTask.__init__(self,
                             context,
                             workdir=context.config.get(
                                 'scala-compile', 'nailgun_dir'))

        # Set up the zinc utils.
        # Command line switch overrides color setting set in pants.ini
        color = context.options.scala_compile_color if context.options.scala_compile_color is not None else \
                context.config.getbool('scala-compile', 'color', default=True)

        self._zinc_utils = ZincUtils(context=context,
                                     java_runner=self.runjava,
                                     color=color)

        # The rough number of source files to build in each compiler pass.
        self._partition_size_hint = \
          context.options.scala_compile_partition_size_hint \
          if context.options.scala_compile_partition_size_hint != -1 else \
          context.config.getint('scala-compile', 'partition_size_hint')

        # Set up dep checking if needed.
        if context.options.scala_check_missing_deps:
            JvmDependencyCache.init_product_requirements(self)

        # Various output directories.
        self._buildroot = get_buildroot()
        workdir = context.config.get('scala-compile',
                                     'workdir') if workdir is None else workdir
        self._resources_dir = os.path.join(workdir, 'resources')
        self._artifact_factory = ZincArtifactFactory(workdir, self.context,
                                                     self._zinc_utils)

        # The ivy confs for which we're building.
        self._confs = context.config.getlist('scala-compile', 'confs')

        # The artifact cache to read from/write to.
        artifact_cache_spec = context.config.getlist('scala-compile',
                                                     'artifact_caches')
        self.setup_artifact_cache(artifact_cache_spec)
コード例 #16
0
ファイル: scala_compile.py プロジェクト: benhuang-zh/commons
  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    # Set up the zinc utils.
    color = not context.options.no_color
    self._zinc_utils = ZincUtils(context=context, nailgun_task=self, color=color)

    # The rough number of source files to build in each compiler pass.
    self._partition_size_hint = (context.options.scala_compile_partition_size_hint
                                 if context.options.scala_compile_partition_size_hint != -1
                                 else context.config.getint('scala-compile', 'partition_size_hint',
                                                            default=1000))

    # Set up dep checking if needed.
    if context.options.scala_check_missing_deps:
      JvmDependencyCache.init_product_requirements(self)

    self._opts = context.config.getlist('scala-compile', 'args')
    if context.options.scala_compile_warnings:
      self._opts.extend(context.config.getlist('scala-compile', 'warning_args'))
    else:
      self._opts.extend(context.config.getlist('scala-compile', 'no_warning_args'))

    # Various output directories.
    workdir = context.config.get('scala-compile', 'workdir')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._artifact_factory = ZincArtifactFactory(workdir, self.context, self._zinc_utils)

    # The ivy confs for which we're building.
    self._confs = context.config.getlist('scala-compile', 'confs')

    # The artifact cache to read from/write to.
    artifact_cache_spec = context.config.getlist('scala-compile', 'artifact_caches', default=[])
    self.setup_artifact_cache(artifact_cache_spec)

    # If we are compiling scala libraries with circular deps on java libraries we need to make sure
    # those cycle deps are present.
    self._inject_java_cycles()
コード例 #17
0
  def execute(self, targets):
    scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
    if not scala_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(scala_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    # Add (only to the local copy) classpath entries necessary for our compiler plugins.
    for conf in self._confs:
      for jar in self._zinc_utils.plugin_jars():
        cp.insert(0, (conf, jar))

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(scala_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_sources_by_target = self._compute_sources_by_target(invalid_targets)
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._get_deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if ZincUtils.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            if self._zinc_utils.run_zinc_split(self._analysis_file,
                                               ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp),
                                                ([], valid_analysis_tmp))):
              raise TaskError('Failed to split off invalid analysis.')
            if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
              if self._zinc_utils.run_zinc_merge([self._invalid_analysis_file, newly_invalid_analysis_tmp],
                                                 invalid_analysis_tmp):
                raise TaskError('Failed to merge prior and current invalid analysis.')
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file)
            ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, splits):
              raise TaskError('Failed to split invalid analysis into per-partition files.')

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          self._process_target_partition(partition, cp)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Kick off the background artifact cache write.
            if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
              self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

            # Merge the newly-valid analysis into our global valid analysis.
            if ZincUtils.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                new_valid_analysis = analysis_file + '.valid.new'
                if self._zinc_utils.run_zinc_merge([self._analysis_file, analysis_file], new_valid_analysis):
                  raise TaskError('Failed to merge new analysis back into valid analysis file.')
              ZincUtils._move_analysis(new_valid_analysis, self._analysis_file)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              ZincUtils._copy_analysis(analysis_file, self._analysis_file)

          if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
            with self.context.new_workunit(name='trim-downstream-analysis'):
              # Trim out the newly-valid sources from our global invalid analysis.
              new_invalid_analysis = analysis_file + '.invalid.new'
              discarded_invalid_analysis = analysis_file + '.invalid.discard'
              if self._zinc_utils.run_zinc_split(self._invalid_analysis_file,
                  [(sources, discarded_invalid_analysis), ([], new_invalid_analysis)]):
                raise TaskError('Failed to trim invalid analysis file.')
              ZincUtils._move_analysis(new_invalid_analysis, self._invalid_analysis_file)

          # Now that all the analysis accounting is complete, we can safely mark the
          # targets as valid.
          vts.update()

        # Check for missing dependencies, if needed.
        if invalidation_check.invalid_vts and os.path.exists(self._analysis_file):
          deps_cache = JvmDependencyCache(self.context, scala_targets, self._analysis_file, self._classes_dir)
          deps_cache.check_undeclared_dependencies()

    # Provide the target->class and source->class mappings to downstream tasks if needed.
    if self.context.products.isrequired('classes'):
      sources_by_target = self._compute_sources_by_target(scala_targets)
      classes_by_source = self._compute_classes_by_source()
      self._add_all_products_to_genmap(sources_by_target, classes_by_source)

    # Update the classpath for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
コード例 #18
0
    def __init__(self, context):
        NailgunTask.__init__(self,
                             context,
                             workdir=context.config.get(
                                 'scala-compile', 'nailgun_dir'))

        # Set up the zinc utils.
        color = not context.options.no_color
        self._zinc_utils = ZincUtils(context=context,
                                     nailgun_task=self,
                                     color=color)

        # The rough number of source files to build in each compiler pass.
        self._partition_size_hint = (
            context.options.scala_compile_partition_size_hint
            if context.options.scala_compile_partition_size_hint != -1 else
            context.config.getint(
                'scala-compile', 'partition_size_hint', default=1000))

        # Set up dep checking if needed.
        if context.options.scala_check_missing_deps:
            JvmDependencyCache.init_product_requirements(self)

        self._opts = context.config.getlist('scala-compile', 'args')
        if context.options.scala_compile_warnings:
            self._opts.extend(
                context.config.getlist('scala-compile', 'warning_args'))
        else:
            self._opts.extend(
                context.config.getlist('scala-compile', 'no_warning_args'))

        # Various output directories.
        workdir = context.config.get('scala-compile', 'workdir')
        self._classes_dir = os.path.join(workdir, 'classes')
        self._analysis_dir = os.path.join(workdir, 'analysis')

        safe_mkdir(self._classes_dir)
        safe_mkdir(self._analysis_dir)

        self._analysis_file = os.path.join(self._analysis_dir,
                                           'global_analysis.valid')
        self._invalid_analysis_file = os.path.join(self._analysis_dir,
                                                   'global_analysis.invalid')
        self._resources_dir = os.path.join(workdir, 'resources')

        # The ivy confs for which we're building.
        self._confs = context.config.getlist('scala-compile', 'confs')

        self.context.products.require_data('exclusives_groups')

        self._local_artifact_cache_spec = \
          context.config.getlist('scala-compile', 'local_artifact_caches2', default=[])
        self._remote_artifact_cache_spec = \
          context.config.getlist('scala-compile', 'remote_artifact_caches2', default=[])

        # A temporary, but well-known, dir to munge analysis files in before caching. It must be
        # well-known so we know where to find the files when we retrieve them from the cache.
        self._analysis_tmpdir = os.path.join(self._analysis_dir,
                                             'artifact_cache_tmpdir')

        # If we are compiling scala libraries with circular deps on java libraries we need to make sure
        # those cycle deps are present.
        self._inject_java_cycles()

        # Sources present in the last analysis that have since been deleted.
        # Generated lazily, so do not access directly. Call self._get_deleted_sources().
        self._deleted_sources = None
コード例 #19
0
ファイル: scala_compile.py プロジェクト: lxwuchang/commons
  def __init__(self, context, workdir=None):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    self._partition_size_hint = \
      context.options.scala_compile_partition_size_hint \
      if context.options.scala_compile_partition_size_hint != -1 else \
      context.config.getint('scala-compile', 'partition_size_hint')

    self.check_missing_deps = context.options.scala_check_missing_deps
    self.check_intransitive_deps = context.options.scala_check_intransitive_deps
    self.check_unnecessary_deps = context.options.scala_check_unnecessary_deps
    if self.check_missing_deps:
      JvmDependencyCache.init_product_requirements(self)

    # We use the scala_compile_color flag if it is explicitly set on the command line.
    self._color = \
      context.options.scala_compile_color if context.options.scala_compile_color is not None else \
      context.config.getbool('scala-compile', 'color', default=True)

    self._compile_profile = context.config.get('scala-compile', 'compile-profile')  # The target scala version.
    self._zinc_profile = context.config.get('scala-compile', 'zinc-profile')
    plugins_profile = context.config.get('scala-compile', 'scalac-plugins-profile')

    self._zinc_classpath = nailgun_profile_classpath(self, self._zinc_profile)
    compiler_classpath = nailgun_profile_classpath(self, self._compile_profile)
    zinc_jars = ScalaCompile.identify_zinc_jars(compiler_classpath, self._zinc_classpath)
    self._zinc_jar_args = []
    for (name, jarpath) in zinc_jars.items():  # The zinc jar names are also the flag names.
      self._zinc_jar_args.extend(['-%s' % name, jarpath])

    self._plugin_jars = nailgun_profile_classpath(self, plugins_profile) if plugins_profile else []

    # All scala targets implicitly depend on the selected scala runtime.
    scaladeps = []
    for spec in context.config.getlist('scala-compile', 'scaladeps'):
      scaladeps.extend(context.resolve(spec))
    for target in context.targets(is_scala):
      target.update_dependencies(scaladeps)

    self._workdir = context.config.get('scala-compile', 'workdir') if workdir is None else workdir
    self._classes_dir = os.path.join(self._workdir, 'classes')
    self._analysis_cache_dir = os.path.join(self._workdir, 'analysis_cache')
    self._resources_dir = os.path.join(self._workdir, 'resources')

    self._main = context.config.get('scala-compile', 'main')

    self._args = context.config.getlist('scala-compile', 'args')
    self._jvm_args = context.config.getlist('scala-compile', 'jvm_args')
    if context.options.scala_compile_warnings:
      self._args.extend(context.config.getlist('scala-compile', 'warning_args'))
    else:
      self._args.extend(context.config.getlist('scala-compile', 'no_warning_args'))

    # Allow multiple flags and also comma-separated values in a single flag.
    plugin_names = [p for val in context.options.plugins for p in val.split(',')] \
      if context.options.plugins is not None \
      else context.config.getlist('scala-compile', 'scalac-plugins', default=[])

    plugin_args = context.config.getdict('scala-compile', 'scalac-plugin-args', default={})

    active_plugins = ScalaCompile.find_plugins(plugin_names, self._plugin_jars)

    for name, jar in active_plugins.items():
      self._args.append('-Xplugin:%s' % jar)
      for arg in plugin_args.get(name, []):
        self._args.append('-P:%s:%s' % (name, arg))

    self._confs = context.config.getlist('scala-compile', 'confs')
    self._depfile_dir = os.path.join(self._workdir, 'depfiles')

    artifact_cache_spec = context.config.getlist('scala-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)
コード例 #20
0
    def execute(self, targets):
        scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(scala_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        # Add (only to the local copy) classpath entries necessary for our compiler plugins.
        for conf in self._confs:
            for jar in self._zinc_utils.plugin_jars():
                cp.insert(0, (conf, jar))

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(scala_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint
                              ) as invalidation_check:
            if invalidation_check.invalid_vts and not self.dry_run:
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_sources_by_target = self._compute_sources_by_target(
                    invalid_targets)
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._get_deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if ZincUtils.is_nonempty_analysis(self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        if self._zinc_utils.run_zinc_split(
                                self._analysis_file,
                            ((invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp),
                             ([], valid_analysis_tmp))):
                            raise TaskError(
                                'Failed to split off invalid analysis.')
                        if ZincUtils.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            if self._zinc_utils.run_zinc_merge([
                                    self._invalid_analysis_file,
                                    newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp):
                                raise TaskError(
                                    'Failed to merge prior and current invalid analysis.'
                                )
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        ZincUtils._move_analysis(valid_analysis_tmp,
                                                 self._analysis_file)
                        ZincUtils._move_analysis(invalid_analysis_tmp,
                                                 self._invalid_analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if ZincUtils.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        if self._zinc_utils.run_zinc_split(
                                self._invalid_analysis_file, splits):
                            raise TaskError(
                                'Failed to split invalid analysis into per-partition files.'
                            )

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    self._process_target_partition(partition, cp)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Kick off the background artifact cache write.
                        if self.get_artifact_cache(
                        ) and self.context.options.write_to_artifact_cache:
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                        # Merge the newly-valid analysis into our global valid analysis.
                        if ZincUtils.is_nonempty_analysis(self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                new_valid_analysis = analysis_file + '.valid.new'
                                if self._zinc_utils.run_zinc_merge(
                                    [self._analysis_file, analysis_file],
                                        new_valid_analysis):
                                    raise TaskError(
                                        'Failed to merge new analysis back into valid analysis file.'
                                    )
                            ZincUtils._move_analysis(new_valid_analysis,
                                                     self._analysis_file)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            ZincUtils._copy_analysis(analysis_file,
                                                     self._analysis_file)

                    if ZincUtils.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            if self._zinc_utils.run_zinc_split(
                                    self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis),
                                 ([], new_invalid_analysis)]):
                                raise TaskError(
                                    'Failed to trim invalid analysis file.')
                            ZincUtils._move_analysis(
                                new_invalid_analysis,
                                self._invalid_analysis_file)

                    # Now that all the analysis accounting is complete, we can safely mark the
                    # targets as valid.
                    vts.update()

                # Check for missing dependencies, if needed.
                if invalidation_check.invalid_vts and os.path.exists(
                        self._analysis_file):
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    self._analysis_file,
                                                    self._classes_dir)
                    deps_cache.check_undeclared_dependencies()

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
            sources_by_target = self._compute_sources_by_target(scala_targets)
            classes_by_source = self._compute_classes_by_source()
            self._add_all_products_to_genmap(sources_by_target,
                                             classes_by_source)

        # Update the classpath for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])