def post_process_cached_vts(cached_vts): # Merge the cached analyses into the existing global one. if cached_vts: with self.context.new_workunit(name='merge-dependencies'): global_deps = Dependencies(self._classes_dir) if os.path.exists(self._depfile): global_deps.load(self._depfile) for vt in cached_vts: for target in vt.targets: depfile = JavaCompile.create_depfile_path(self._depfile_tmpdir, [target]) if os.path.exists(depfile): deps = Dependencies(self._classes_dir) deps.load(depfile) global_deps.merge(deps) global_deps.save(self._depfile)
def merge_depfile(self, versioned_target_set): if len(versioned_target_set.targets) <= 1: return dst_depfile = self.create_depfile_path(versioned_target_set.targets) dst_deps = Dependencies(self._classes_dir) for target in versioned_target_set.targets: src_depfile = self.create_depfile_path([target]) if os.path.exists(src_depfile): src_deps = Dependencies(self._classes_dir) src_deps.load(src_depfile) dst_deps.merge(src_deps) dst_deps.save(dst_depfile)
def _write_to_artifact_cache(self, vts, sources_by_target): self._ensure_depfile_tmpdir() vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets]) # This work can happen in the background, if there's a measurable benefit to that. # Split the depfile into per-target files. splits = [(sources, JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])) for target, sources in sources_by_target.items()] deps = Dependencies(self._classes_dir) if os.path.exists(self._depfile): deps.load(self._depfile) deps.split(splits) # Gather up the artifacts. vts_artifactfiles_pairs = [] for target, sources in sources_by_target.items(): artifacts = [JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])] for source in sources: for cls in deps.classes_by_source.get(source, []): artifacts.append(os.path.join(self._classes_dir, cls)) vt = vt_by_target.get(target) if vt is not None: vts_artifactfiles_pairs.append((vt, artifacts)) # Write to the artifact cache. self.update_artifact_cache(vts_artifactfiles_pairs)
def __init__(self, context): NailgunTask.__init__(self, context, workdir=context.config.get( 'java-compile', 'nailgun_dir')) self._flatten = \ context.options.java_compile_flatten if context.options.java_compile_flatten is not None else \ context.config.getbool('java-compile', 'default_to_flatten') workdir = context.config.get('java-compile', 'workdir') self._classes_dir = os.path.join(workdir, 'classes') self._resources_dir = os.path.join(workdir, 'resources') self._depfile_dir = os.path.join(workdir, 'depfiles') self._deps = Dependencies(self._classes_dir) self._jmake_profile = context.config.get('java-compile', 'jmake-profile') self._compiler_profile = context.config.get('java-compile', 'compiler-profile') self._args = context.config.getlist('java-compile', 'args') self._jvm_args = context.config.getlist('java-compile', 'jvm_args') if context.options.java_compile_warnings: self._args.extend( context.config.getlist('java-compile', 'warning_args')) else: self._args.extend( context.config.getlist('java-compile', 'no_warning_args')) self._confs = context.config.getlist('java-compile', 'confs')
def __init__(self, context): NailgunTask.__init__(self, context, workdir=context.config.get( 'java-compile', 'nailgun_dir')) self._partition_size_hint = \ context.options.java_compile_partition_size_hint \ if context.options.java_compile_partition_size_hint != -1 \ else context.config.getint('java-compile', 'partition_size_hint') workdir = context.config.get('java-compile', 'workdir') self._classes_dir = os.path.join(workdir, 'classes') self._resources_dir = os.path.join(workdir, 'resources') self._depfile_dir = os.path.join(workdir, 'depfiles') self._deps = Dependencies(self._classes_dir) self._jmake_profile = context.config.get('java-compile', 'jmake-profile') self._compiler_profile = context.config.get('java-compile', 'compiler-profile') self._args = context.config.getlist('java-compile', 'args') self._jvm_args = context.config.getlist('java-compile', 'jvm_args') if context.options.java_compile_warnings: self._args.extend( context.config.getlist('java-compile', 'warning_args')) else: self._args.extend( context.config.getlist('java-compile', 'no_warning_args')) self._confs = context.config.getlist('java-compile', 'confs')
def execute_single_compilation(self, vt, cp): depfile = self.create_depfile_path(vt.targets) self.merge_depfile(vt) # Get what we can from previous builds. sources_by_target, fingerprint = self.calculate_sources(vt.targets) if sources_by_target: sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values()) if not sources: self.context.log.warn('Skipping java compile for targets with no sources:\n %s' % '\n '.join(str(t) for t in sources_by_target.keys())) else: classpath = [jar for conf, jar in cp if conf in self._confs] result = self.compile(classpath, sources, fingerprint, depfile) if result != 0: default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result) raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message)) self.split_depfile(vt) all_artifact_files = [depfile] if self._artifact_cache and self.context.options.write_to_artifact_cache: deps = Dependencies(self._classes_dir) deps.load(depfile) vts_artifactfile_pairs = [] for single_vt in vt.versioned_targets: per_target_depfile = self.create_depfile_path([single_vt.target]) per_target_artifact_files = [per_target_depfile] for _, classes_by_source in deps.findclasses([single_vt.target]).items(): for _, classes in classes_by_source.items(): classfile_paths = [os.path.join(self._classes_dir, cls) for cls in classes] per_target_artifact_files.extend(classfile_paths) all_artifact_files.extend(classfile_paths) vts_artifactfile_pairs.append((single_vt, per_target_artifact_files)) vts_artifactfile_pairs.append((vt, all_artifact_files)) self.update_artifact_cache(vts_artifactfile_pairs)
def execute(self, targets): java_targets = filter(_is_java, targets) if java_targets: safe_mkdir(self._classes_dir) safe_mkdir(self._depfile_dir) egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(java_targets[0]) for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)]) with self.invalidated(java_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint) as invalidation_check: for vt in invalidation_check.invalid_vts_partitioned: # Compile, using partitions for efficiency. exclusives_classpath = egroups.get_classpath_for_group(group_id) self.execute_single_compilation(vt, exclusives_classpath) if not self.dry_run: vt.update() for vt in invalidation_check.all_vts: depfile = self.create_depfile_path(vt.targets) if not self.dry_run and os.path.exists(depfile): # Read in the deps created either just now or by a previous run on these targets. deps = Dependencies(self._classes_dir) deps.load(depfile) self._deps.merge(deps) if not self.dry_run: if self.context.products.isrequired('classes'): genmap = self.context.products.get('classes') # Map generated classes to the owning targets and sources. for target, classes_by_source in self._deps.findclasses(java_targets).items(): for source, classes in classes_by_source.items(): genmap.add(source, self._classes_dir, classes) genmap.add(target, self._classes_dir, classes) # TODO(John Sirois): Map target.resources in the same way # 'Map' (rewrite) annotation processor service info files to the owning targets. for target in java_targets: if is_apt(target) and target.processors: basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target])) processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE) self.write_processor_info(processor_info_file, target.processors) genmap.add(target, basedir, [_PROCESSOR_INFO_FILE]) # Produce a monolithic apt processor service info file for further compilation rounds # and the unit test classpath. all_processors = set() for target in java_targets: if is_apt(target) and target.processors: all_processors.update(target.processors) processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE) if os.path.exists(processor_info_file): with safe_open(processor_info_file, 'r') as f: for processor in f: all_processors.add(processor.strip()) self.write_processor_info(processor_info_file, all_processors)
def split_depfile(self, vt): depfile = self.create_depfile_path(vt.targets) if len(vt.targets) <= 1 or not os.path.exists(depfile) or self.dry_run: return deps = Dependencies(self._classes_dir) deps.load(depfile) classes_by_source_by_target = deps.findclasses(vt.targets) for target in vt.targets: classes_by_source = classes_by_source_by_target.get(target, {}) dst_depfile = self.create_depfile_path([target]) dst_deps = Dependencies(self._classes_dir) for source, classes in classes_by_source.items(): src = os.path.join(target.target_base, source) dst_deps.add(src, classes) dst_deps.save(dst_depfile)
def post_process(self, versioned_targets): depfile = self.create_depfile_path(versioned_targets.targets) if not self.dry_run and os.path.exists(depfile): # Read in the deps created either just now or by a previous compiler run on these targets. deps = Dependencies(self._classes_dir) deps.load(depfile) self.split_depfile(deps, versioned_targets) self._deps.merge(deps)
def _compute_classes_by_source(self, depfile=None): """Compute src->classes.""" if depfile is None: depfile = self._depfile if not os.path.exists(depfile): return {} deps = Dependencies(self._classes_dir) deps.load(depfile) return deps.classes_by_source
def execute(self, targets): if not self._flatten and len(targets) > 1: topologically_sorted_targets = filter( is_scala, reversed(InternalTarget.sort_targets(targets))) for target in topologically_sorted_targets: self.execute([target]) return self.context.log.info('Compiling targets %s' % str(targets)) scala_targets = filter(is_scala, targets) if scala_targets: with self.context.state('classpath', []) as cp: for conf in self._confs: cp.insert(0, (conf, self._resources_dir)) cp.insert(0, (conf, self._classes_dir)) with self.changed(scala_targets, invalidate_dependants=True) as changed_targets: sources_by_target = self.calculate_sources(changed_targets) if sources_by_target: sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values()) if not sources: self.context.log.warn( 'Skipping scala compile for targets with no sources:\n %s' % '\n '.join( str(t) for t in sources_by_target.keys())) else: classpath = [ jar for conf, jar in cp if conf in self._confs ] result = self.compile(classpath, sources) if result != 0: raise TaskError('%s returned %d' % (self._main, result)) if self.context.products.isrequired('classes'): genmap = self.context.products.get('classes') # Map generated classes to the owning targets and sources. dependencies = Dependencies(self._classes_dir, self._depfile) for target, classes_by_source in dependencies.findclasses( targets).items(): for source, classes in classes_by_source.items(): genmap.add(source, self._classes_dir, classes) genmap.add(target, self._classes_dir, classes) # TODO(John Sirois): Map target.resources in the same way # Create and Map scala plugin info files to the owning targets. for target in targets: if is_scalac_plugin(target) and target.classname: basedir = self.write_plugin_info(target) genmap.add(target, basedir, [_PLUGIN_INFO_FILE])
def execute(self, targets): java_targets = filter(lambda t: has_sources(t, '.java'), targets) if not java_targets: return # Get the exclusives group for the targets to compile. # Group guarantees that they'll be a single exclusives key for them. egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(java_targets[0]) # Add classes and resource dirs to the classpath for us and for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)]) egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) # Get the classpath generated by upstream JVM tasks (including previous calls to execute()). cp = egroups.get_classpath_for_group(group_id) with self.invalidated(java_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint) as invalidation_check: if not self.dry_run: for vts in invalidation_check.invalid_vts_partitioned: # Compile, using partitions for efficiency. sources_by_target = self._process_target_partition(vts, cp) # TODO: Check for missing dependencies. See ScalaCompile for an example. # Will require figuring out what the actual deps of a class file are. vts.update() if self.get_artifact_cache() and self.context.options.write_to_artifact_cache: self._write_to_artifact_cache(vts, sources_by_target) # Provide the target->class and source->class mappings to downstream tasks if needed. if self.context.products.isrequired('classes'): if os.path.exists(self._depfile): sources_by_target = self._compute_sources_by_target(java_targets) deps = Dependencies(self._classes_dir) deps.load(self._depfile) self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source) # Produce a monolithic apt processor service info file for further compilation rounds # and the unit test classpath. all_processors = set() for target in java_targets: if is_apt(target) and target.processors: all_processors.update(target.processors) processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE) if os.path.exists(processor_info_file): with safe_open(processor_info_file, 'r') as f: for processor in f: all_processors.add(processor.strip()) self.write_processor_info(processor_info_file, all_processors)
def split_depfile(self, deps, versioned_target_set): if len(versioned_target_set.targets) <= 1: return classes_by_source_by_target = deps.findclasses( versioned_target_set.targets) for target in versioned_target_set.targets: classes_by_source = classes_by_source_by_target.get(target, {}) dst_depfile = self.create_depfile_path([target]) dst_deps = Dependencies(self._classes_dir) for source, classes in classes_by_source.items(): src = os.path.join(target.target_base, source) dst_deps.add(src, classes) dst_deps.save(dst_depfile)
def __init__(self, context): NailgunTask.__init__(self, context, workdir=context.config.get( 'java-compile', 'nailgun_dir')) if context.options.java_compile_partition_size_hint != -1: self._partition_size_hint = context.options.java_compile_partition_size_hint else: self._partition_size_hint = context.config.getint( 'java-compile', 'partition_size_hint', default=1000) workdir = context.config.get('java-compile', 'workdir') self._classes_dir = os.path.join(workdir, 'classes') self._resources_dir = os.path.join(workdir, 'resources') self._depfile_dir = os.path.join(workdir, 'depfiles') self._deps = Dependencies(self._classes_dir) self._jmake_profile = context.config.get('java-compile', 'jmake-profile') self._compiler_profile = context.config.get('java-compile', 'compiler-profile') self._opts = context.config.getlist('java-compile', 'args') self._jvm_args = context.config.getlist('java-compile', 'jvm_args') self._javac_opts = [] if context.options.java_compile_args: for arg in context.options.java_compile_args: self._javac_opts.extend(shlex.split(arg)) else: self._javac_opts.extend( context.config.getlist('java-compile', 'javac_args', default=[])) if context.options.java_compile_warnings: self._opts.extend( context.config.getlist('java-compile', 'warning_args')) else: self._opts.extend( context.config.getlist('java-compile', 'no_warning_args')) self._confs = context.config.getlist('java-compile', 'confs') # The artifact cache to read from/write to. artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches', default=[]) self.setup_artifact_cache(artifact_cache_spec)
def post_process(self, vt, upstream_analysis_caches, split_artifact): output_dir, depfile, analysis_cache = self.create_output_paths( vt.targets) if not self.dry_run: # Read in the deps created either just now or by a previous compiler run on these targets. if os.path.exists(depfile): self.context.log.debug('Reading dependencies from ' + depfile) deps = Dependencies(output_dir) deps.load(depfile) if split_artifact: self.split_artifact(deps, vt) if self.context.products.isrequired('classes'): genmap = self.context.products.get('classes') for target, classes_by_source in deps.findclasses( vt.targets).items(): for source, classes in classes_by_source.items(): genmap.add(source, output_dir, classes) genmap.add(target, output_dir, classes) # TODO(John Sirois): Map target.resources in the same way # Create and Map scala plugin info files to the owning targets. for target in vt.targets: if is_scalac_plugin(target) and target.classname: basedir = self.write_plugin_info(target) genmap.add(target, basedir, [_PLUGIN_INFO_FILE]) # Update the upstream analysis map. if os.path.exists(analysis_cache): analysis_cache_parts = os.path.split(analysis_cache) if not upstream_analysis_caches.has(output_dir): # A previous chunk might have already updated this. It is certainly possible for a later chunk to # independently depend on some target that a previous chunk already built. upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [analysis_cache_parts[1]]) # Update the classpath. with self.context.state('classpath', []) as cp: for conf in self._confs: cp.insert(0, (conf, output_dir))
def execute_single_compilation(self, versioned_targets, cp): compilation_id = Target.maybe_readable_identify( versioned_targets.targets) # TODO: Use the artifact cache. In flat mode we may want to look for the artifact for all targets, # not just the invalid ones, as it might be more likely to be present. Or we could look for both. if self._flatten: # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This # allows us to build different targets in different invocations without losing dependency information # from any of them. depfile = os.path.join(self._depfile_dir, 'dependencies.flat') else: # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each # compilation will read in the entire depfile, add its stuff to it and write it out again). depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies' if not versioned_targets.valid: self.context.log.info('Compiling targets %s' % str(versioned_targets.targets)) sources_by_target, processors, fingerprint = self.calculate_sources( versioned_targets.targets) if sources_by_target: sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values()) if not sources: touch( depfile ) # Create an empty depfile, since downstream code may assume that one exists. self.context.log.warn( 'Skipping java compile for targets with no sources:\n %s' % '\n '.join(str(t) for t in sources_by_target.keys())) else: classpath = [ jar for conf, jar in cp if conf in self._confs ] result = self.compile(classpath, sources, fingerprint, depfile) if result != 0: default_message = 'Unexpected error - %s returned %d' % ( _JMAKE_MAIN, result) raise TaskError( _JMAKE_ERROR_CODES.get(result, default_message)) if processors: # Produce a monolithic apt processor service info file for further compilation rounds # and the unit test classpath. processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE) if os.path.exists(processor_info_file): with safe_open(processor_info_file, 'r') as f: for processor in f: processors.add(processor.strip()) self.write_processor_info(processor_info_file, processors) # Read in the deps created either just now or by a previous compiler run on these targets. deps = Dependencies(self._classes_dir) deps.load(depfile) self._deps.merge(deps)
def execute(self, targets): if not self._flatten and len(targets) > 1: topologically_sorted_targets = filter( JavaCompile._is_java, reversed(InternalTarget.sort_targets(targets))) for target in topologically_sorted_targets: self.execute([target]) return self.context.log.info('Compiling targets %s' % str(targets)) java_targets = filter(JavaCompile._is_java, targets) if java_targets: with self.context.state('classpath', []) as cp: for conf in self._confs: cp.insert(0, (conf, self._resources_dir)) cp.insert(0, (conf, self._classes_dir)) with self.changed(java_targets, invalidate_dependants=True) as changed: sources_by_target, processors, fingerprint = self.calculate_sources( changed) if sources_by_target: sources = reduce( lambda all, sources: all.union(sources), sources_by_target.values()) if not sources: self.context.log.warn( 'Skipping java compile for targets with no sources:\n %s' % '\n '.join( str(t) for t in sources_by_target.keys())) else: classpath = [ jar for conf, jar in cp if conf in self._confs ] result = self.compile(classpath, sources, fingerprint) if result != 0: default_message = 'Unexpected error - %s returned %d' % ( _JMAKE_MAIN, result) raise TaskError( _JMAKE_ERROR_CODES.get( result, default_message)) if processors: # Produce a monolithic apt processor service info file for further compilation rounds # and the unit test classpath. processor_info_file = os.path.join( self._classes_dir, _PROCESSOR_INFO_FILE) if os.path.exists(processor_info_file): with safe_open(processor_info_file, 'r') as f: for processor in f: processors.add(processor.strip()) self.write_processor_info(processor_info_file, processors) if self.context.products.isrequired('classes'): genmap = self.context.products.get('classes') # Map generated classes to the owning targets and sources. dependencies = Dependencies(self._classes_dir, self._dependencies_file) for target, classes_by_source in dependencies.findclasses( targets).items(): for source, classes in classes_by_source.items(): genmap.add(source, self._classes_dir, classes) genmap.add(target, self._classes_dir, classes) # TODO(John Sirois): Map target.resources in the same way # 'Map' (rewrite) annotation processor service info files to the owning targets. for target in targets: if is_apt(target) and target.processors: basedir = os.path.join(self._resources_dir, target.id) processor_info_file = os.path.join( basedir, _PROCESSOR_INFO_FILE) self.write_processor_info(processor_info_file, target.processors) genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
def merge_artifact(self, versioned_target_set): if len(versioned_target_set.targets) <= 1: return with temporary_dir() as tmpdir: dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths( versioned_target_set.targets) safe_rmtree(dst_output_dir) safe_mkdir(dst_output_dir) src_analysis_caches = [] analysis_args = [] analysis_args.extend(self._zinc_jar_args) analysis_args.extend([ '-log-level', self.context.options.log_level or 'info', '-analysis', ]) # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success. dst_deps = Dependencies(dst_output_dir) for target in versioned_target_set.targets: src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths( [target]) if os.path.exists(src_depfile): src_deps = Dependencies(src_output_dir) src_deps.load(src_depfile) dst_deps.merge(src_deps) classes_by_source = src_deps.findclasses([target]).get( target, {}) for source, classes in classes_by_source.items(): for cls in classes: src = os.path.join(src_output_dir, cls) dst = os.path.join(dst_output_dir, cls) # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that # it's missing and rebuild it. # dst may already exist if we have overlapping targets. It's not a good idea # to have those, but until we enforce it, we must allow it here. if os.path.exists(src) and not os.path.exists(dst): # Copy the class file. safe_mkdir(os.path.dirname(dst)) os.link(src, dst) # Use zinc to rebase a copy of the per-target analysis files prior to merging. if os.path.exists(src_analysis_cache): src_analysis_cache_tmp = \ os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir)) shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp) src_analysis_caches.append(src_analysis_cache_tmp) rebase_args = analysis_args + [ '-cache', src_analysis_cache_tmp, '-rebase', '%s:%s' % (src_output_dir, dst_output_dir), ] if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args): self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \ 'Target may require a full rebuild.' % src_analysis_cache_tmp) dst_deps.save(dst_depfile) # Use zinc to merge the analysis files. merge_args = analysis_args + [ '-cache', dst_analysis_cache, '-merge', ':'.join(src_analysis_caches), ] if self.runjava(self._main, classpath=self._zinc_classpath, args=merge_args, jvmargs=self._jvm_args): raise TaskError, 'zinc failed to merge analysis files %s to %s' % \ (':'.join(src_analysis_caches), dst_analysis_cache)
def split_artifact(self, deps, versioned_target_set): if len(versioned_target_set.targets) <= 1: return buildroot = get_buildroot() classes_by_source_by_target = deps.findclasses( versioned_target_set.targets) src_output_dir, _, src_analysis_cache = self.create_output_paths( versioned_target_set.targets) analysis_splits = [ ] # List of triples of (list of sources, destination output dir, destination analysis cache). for target in versioned_target_set.targets: classes_by_source = classes_by_source_by_target.get(target, {}) dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths( [target]) safe_rmtree(dst_output_dir) safe_mkdir(dst_output_dir) sources = [] dst_deps = Dependencies(dst_output_dir) for source, classes in classes_by_source.items(): src = os.path.join(target.target_base, source) dst_deps.add(src, classes) source_abspath = os.path.join(buildroot, target.target_base, source) sources.append(source_abspath) for cls in classes: # Copy the class file. dst = os.path.join(dst_output_dir, cls) safe_mkdir(os.path.dirname(dst)) os.link(os.path.join(src_output_dir, cls), dst) dst_deps.save(dst_depfile) analysis_splits.append( (sources, dst_output_dir, dst_analysis_cache)) # Use zinc to split the analysis files. if os.path.exists(src_analysis_cache): analysis_args = [] analysis_args.extend(self._zinc_jar_args) analysis_args.extend([ '-log-level', self.context.options.log_level or 'info', '-analysis', ]) split_args = analysis_args + [ '-cache', src_analysis_cache, '-split', ','.join([ '{%s}:%s' % (':'.join(x[0]), x[2]) for x in analysis_splits ]), ] if self.runjava(self._main, classpath=self._zinc_classpath, args=split_args, jvmargs=self._jvm_args): raise TaskError, 'zinc failed to split analysis files %s from %s' %\ (':'.join([x[2] for x in analysis_splits]), src_analysis_cache) # Now rebase the newly created analysis files. for split in analysis_splits: dst_analysis_cache = split[2] if os.path.exists(dst_analysis_cache): rebase_args = analysis_args + [ '-cache', dst_analysis_cache, '-rebase', '%s:%s' % (src_output_dir, split[1]), ] if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args): raise TaskError, 'In split_artifact: zinc failed to rebase analysis file %s' % dst_analysis_cache
def execute_single_compilation(self, versioned_target_set, cp, upstream_analysis_caches): """Execute a single compilation, updating upstream_analysis_caches if needed.""" if self._flatten: compilation_id = 'flat' output_dir = self._flat_classes_dir else: compilation_id = Target.maybe_readable_identify( versioned_target_set.targets) # Each compilation must output to its own directory, so zinc can then associate those with the appropriate # analysis caches of previous compilations. We then copy the results out to the real output dir. output_dir = os.path.join(self._incremental_classes_dir, compilation_id) depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies' analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache' safe_mkdir(output_dir) if not versioned_target_set.valid: with self.check_artifact_cache( versioned_target_set, build_artifacts=[output_dir, depfile, analysis_cache]) as in_cache: if not in_cache: self.context.log.info('Compiling targets %s' % versioned_target_set.targets) sources_by_target = self.calculate_sources( versioned_target_set.targets) if sources_by_target: sources = reduce( lambda all, sources: all.union(sources), sources_by_target.values()) if not sources: # Create empty files, since downstream code may assume that these exist. touch(depfile) touch(analysis_cache) self.context.log.warn( 'Skipping scala compile for targets with no sources:\n %s' % '\n '.join( str(t) for t in sources_by_target.keys())) else: classpath = [ jar for conf, jar in cp if conf in self._confs ] result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile) if result != 0: raise TaskError('%s returned %d' % (self._main, result)) # Note that the following post-processing steps must happen even for valid targets. # Read in the deps created either just now or by a previous compiler run on these targets. if self.context.products.isrequired('classes'): self.context.log.debug('Reading dependencies from ' + depfile) deps = Dependencies(output_dir) deps.load(depfile) genmap = self.context.products.get('classes') for target, classes_by_source in deps.findclasses( versioned_target_set.targets).items(): for source, classes in classes_by_source.items(): genmap.add(source, output_dir, classes) genmap.add(target, output_dir, classes) # TODO(John Sirois): Map target.resources in the same way # Create and Map scala plugin info files to the owning targets. for target in versioned_target_set.targets: if is_scalac_plugin(target) and target.classname: basedir = self.write_plugin_info(target) genmap.add(target, basedir, [_PLUGIN_INFO_FILE]) # Update the upstream analysis map. analysis_cache_parts = os.path.split(analysis_cache) if not upstream_analysis_caches.has(output_dir): # A previous chunk might have already updated this. It is certainly possible for a later chunk to # independently depend on some target that a previous chunk already built. upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [analysis_cache_parts[1]]) # Update the classpath. with self.context.state('classpath', []) as cp: for conf in self._confs: cp.insert(0, (conf, output_dir))