def execute(self, targets): # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so we # can inject a dep on the scala runtime library and still have it ivy-resolve. scala_targets = [t for t in targets if t.has_sources('.scala')] if not scala_targets: return # Get the exclusives group for the targets to compile. # Group guarantees that they'll be a single exclusives key for them. egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(scala_targets[0]) # Add resource dirs to the classpath for us and for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) # Get the classpath generated by upstream JVM tasks (including previous calls to execute()). cp = egroups.get_classpath_for_group(group_id) # Add (only to the local copy) classpath entries necessary for our compiler plugins. for conf in self._confs: for jar in self._zinc_utils.plugin_jars(): cp.insert(0, (conf, jar)) # Invalidation check. Everything inside the with block must succeed for the # invalid targets to become valid. with self.invalidated(scala_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint) as invalidation_check: if invalidation_check.invalid_vts and not self.dry_run: invalid_targets = [vt.target for vt in invalidation_check.invalid_vts] # The analysis for invalid and deleted sources is no longer valid. invalid_sources_by_target = self._compute_sources_by_target(invalid_targets) invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values())) deleted_sources = self._get_deleted_sources() # Work in a tmpdir so we don't stomp the main analysis files on error. # The tmpdir is cleaned up in a shutdown hook, because background work # may need to access files we create here even after this method returns. self._ensure_analysis_tmpdir() tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4())) os.mkdir(tmpdir) valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis') newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis') invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis') if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='prepare-analysis'): Analysis.split_to_paths(self._analysis_file, [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp) if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file): Analysis.merge_from_paths([self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp) else: invalid_analysis_tmp = newly_invalid_analysis_tmp # Now it's OK to overwrite the main analysis files with the new state. shutil.move(valid_analysis_tmp, self._analysis_file) shutil.move(invalid_analysis_tmp, self._invalid_analysis_file) # Figure out the sources and analysis belonging to each partition. partitions = [] # Each element is a triple (vts, sources_by_target, analysis). for vts in invalidation_check.invalid_vts_partitioned: partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets)) os.mkdir(partition_tmpdir) sources = list(itertools.chain.from_iterable( [invalid_sources_by_target.get(t, []) for t in vts.targets])) analysis_file = os.path.join(partition_tmpdir, 'analysis') partitions.append((vts, sources, analysis_file)) # Split per-partition files out of the global invalid analysis. if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions: with self.context.new_workunit(name='partition-analysis'): splits = [(x[1], x[2]) for x in partitions] Analysis.split_to_paths(self._invalid_analysis_file, splits) # Now compile partitions one by one. for partition in partitions: (vts, sources, analysis_file) = partition self._process_target_partition(partition, cp) # No exception was thrown, therefore the compile succeded and analysis_file is now valid. if os.path.exists(analysis_file): # The compilation created an analysis. # Merge the newly-valid analysis with our global valid analysis. new_valid_analysis = analysis_file + '.valid.new' if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='update-upstream-analysis'): Analysis.merge_from_paths([self._analysis_file, analysis_file], new_valid_analysis) else: # We need to keep analysis_file around. Background tasks may need it. shutil.copy(analysis_file, new_valid_analysis) # Move the merged valid analysis to its proper location. # We do this before checking for missing dependencies, so that we can still # enjoy an incremental compile after fixing missing deps. shutil.move(new_valid_analysis, self._analysis_file) # Check for missing dependencies. actual_deps = Analysis.parse_deps_from_path(self._analysis_file) # TODO(benjy): Temporary hack until we inject a dep on the scala runtime jar. actual_deps_filtered = {} scalalib_re = re.compile(r'scala-library-\d+\.\d+\.\d+\.jar$') for src, deps in actual_deps.iteritems(): actual_deps_filtered[src] = filter(lambda x: scalalib_re.search(x) is None, deps) self.check_for_missing_dependencies(sources, actual_deps_filtered) # Kick off the background artifact cache write. if self.artifact_cache_writes_enabled(): self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target) if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file): with self.context.new_workunit(name='trim-downstream-analysis'): # Trim out the newly-valid sources from our global invalid analysis. new_invalid_analysis = analysis_file + '.invalid.new' discarded_invalid_analysis = analysis_file + '.invalid.discard' Analysis.split_to_paths(self._invalid_analysis_file, [(sources, discarded_invalid_analysis)], new_invalid_analysis) shutil.move(new_invalid_analysis, self._invalid_analysis_file) # Now that all the analysis accounting is complete, and we have no missing deps, # we can safely mark the targets as valid. vts.update() # Provide the target->class and source->class mappings to downstream tasks if needed. if self.context.products.isrequired('classes'): sources_by_target = self._compute_sources_by_target(scala_targets) classes_by_source = self._compute_classes_by_source() self._add_all_products_to_genmap(sources_by_target, classes_by_source) # Update the classpath for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
def execute(self, targets): # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g., # we can inject a dep on the scala runtime library and still have it ivy-resolve. relevant_targets = [t for t in targets if t.has_sources(self._file_suffix)] if not relevant_targets: return # Get the exclusives group for the targets to compile. # Group guarantees that they'll be a single exclusives key for them. egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(relevant_targets[0]) # Add resource dirs to the classpath for us and for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) # Get the classpath generated by upstream JVM tasks (including previous calls to execute()). classpath = egroups.get_classpath_for_group(group_id) # Add any extra classpath elements. for conf in self._confs: for jar in self.extra_classpath_elements(): classpath.insert(0, (conf, jar)) # Target -> sources (relative to buildroot). sources_by_target = self._compute_sources_by_target(relevant_targets) # Invalidation check. Everything inside the with block must succeed for the # invalid targets to become valid. with self.invalidated(relevant_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint) as invalidation_check: if invalidation_check.invalid_vts and not self.dry_run: # The analysis for invalid and deleted sources is no longer valid. invalid_targets = [vt.target for vt in invalidation_check.invalid_vts] invalid_sources_by_target = {} for tgt in invalid_targets: invalid_sources_by_target[tgt] = sources_by_target[tgt] invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values())) deleted_sources = self._deleted_sources() # Work in a tmpdir so we don't stomp the main analysis files on error. # The tmpdir is cleaned up in a shutdown hook, because background work # may need to access files we create here even after this method returns. self._ensure_analysis_tmpdir() tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4())) os.mkdir(tmpdir) valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis') newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis') invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis') if self._analysis_parser.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='prepare-analysis'): self._analysis_tools.split_to_paths(self._analysis_file, [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp) if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file): self._analysis_tools.merge_from_paths( [self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp) else: invalid_analysis_tmp = newly_invalid_analysis_tmp # Now it's OK to overwrite the main analysis files with the new state. shutil.move(valid_analysis_tmp, self._analysis_file) shutil.move(invalid_analysis_tmp, self._invalid_analysis_file) # Register products for all the valid targets. # We register as we go, so dependency checking code can use this data. valid_targets = list(set(relevant_targets) - set(invalid_targets)) self._register_products(valid_targets, sources_by_target, self._analysis_file) # Figure out the sources and analysis belonging to each partition. partitions = [] # Each element is a triple (vts, sources_by_target, analysis). for vts in invalidation_check.invalid_vts_partitioned: partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets)) os.mkdir(partition_tmpdir) sources = list(itertools.chain.from_iterable( [invalid_sources_by_target.get(t, []) for t in vts.targets])) analysis_file = os.path.join(partition_tmpdir, 'analysis') partitions.append((vts, sources, analysis_file)) # Split per-partition files out of the global invalid analysis. if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions: with self.context.new_workunit(name='partition-analysis'): splits = [(x[1], x[2]) for x in partitions] self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits) # Now compile partitions one by one. for partition in partitions: (vts, sources, analysis_file) = partition cp_entries = [entry for conf, entry in classpath if conf in self._confs] self._process_target_partition(partition, cp_entries) # No exception was thrown, therefore the compile succeded and analysis_file is now valid. if os.path.exists(analysis_file): # The compilation created an analysis. # Merge the newly-valid analysis with our global valid analysis. new_valid_analysis = analysis_file + '.valid.new' if self._analysis_parser.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='update-upstream-analysis'): self._analysis_tools.merge_from_paths([self._analysis_file, analysis_file], new_valid_analysis) else: # We need to keep analysis_file around. Background tasks may need it. shutil.copy(analysis_file, new_valid_analysis) # Move the merged valid analysis to its proper location. # We do this before checking for missing dependencies, so that we can still # enjoy an incremental compile after fixing missing deps. shutil.move(new_valid_analysis, self._analysis_file) # Update the products with the latest classes. Must happen before the # missing dependencies check. self._register_products(vts.targets, sources_by_target, analysis_file) if self._dep_analyzer: # Check for missing dependencies. actual_deps = self._analysis_parser.parse_deps_from_path(analysis_file, lambda: self._compute_classpath_elements_by_class(cp_entries)) with self.context.new_workunit(name='find-missing-dependencies'): self._dep_analyzer.check(sources, actual_deps) # Kick off the background artifact cache write. if self.artifact_cache_writes_enabled(): self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target) if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file): with self.context.new_workunit(name='trim-downstream-analysis'): # Trim out the newly-valid sources from our global invalid analysis. new_invalid_analysis = analysis_file + '.invalid.new' discarded_invalid_analysis = analysis_file + '.invalid.discard' self._analysis_tools.split_to_paths(self._invalid_analysis_file, [(sources, discarded_invalid_analysis)], new_invalid_analysis) shutil.move(new_invalid_analysis, self._invalid_analysis_file) # Now that all the analysis accounting is complete, and we have no missing deps, # we can safely mark the targets as valid. vts.update() else: # Nothing to build. Register products for all the targets in one go. self._register_products(relevant_targets, sources_by_target, self._analysis_file) # Update the classpath for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)]) self.post_process(relevant_targets)
def execute(self, targets): scala_targets = filter(lambda t: has_sources(t, '.scala'), targets) if not scala_targets: return # Get the exclusives group for the targets to compile. # Group guarantees that they'll be a single exclusives key for them. egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(scala_targets[0]) # Add resource dirs to the classpath for us and for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) # Get the classpath generated by upstream JVM tasks (including previous calls to execute()). cp = egroups.get_classpath_for_group(group_id) # Add (only to the local copy) classpath entries necessary for our compiler plugins. for conf in self._confs: for jar in self._zinc_utils.plugin_jars(): cp.insert(0, (conf, jar)) # Invalidation check. Everything inside the with block must succeed for the # invalid targets to become valid. with self.invalidated(scala_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint) as invalidation_check: if invalidation_check.invalid_vts and not self.dry_run: invalid_targets = [vt.target for vt in invalidation_check.invalid_vts] # The analysis for invalid and deleted sources is no longer valid. invalid_sources_by_target = self._compute_sources_by_target(invalid_targets) invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values())) deleted_sources = self._get_deleted_sources() # Work in a tmpdir so we don't stomp the main analysis files on error. # The tmpdir is cleaned up in a shutdown hook, because background work # may need to access files we create here even after this method returns. self._ensure_analysis_tmpdir() tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4())) os.mkdir(tmpdir) valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis') newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis') invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis') if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='prepare-analysis'): if self._zinc_utils.run_zinc_split(self._analysis_file, ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp), ([], valid_analysis_tmp))): raise TaskError('Failed to split off invalid analysis.') if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file): if self._zinc_utils.run_zinc_merge([self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp): raise TaskError('Failed to merge prior and current invalid analysis.') else: invalid_analysis_tmp = newly_invalid_analysis_tmp # Now it's OK to overwrite the main analysis files with the new state. ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file) ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file) # Figure out the sources and analysis belonging to each partition. partitions = [] # Each element is a triple (vts, sources_by_target, analysis). for vts in invalidation_check.invalid_vts_partitioned: partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets)) os.mkdir(partition_tmpdir) sources = list(itertools.chain.from_iterable( [invalid_sources_by_target.get(t, []) for t in vts.targets])) analysis_file = os.path.join(partition_tmpdir, 'analysis') partitions.append((vts, sources, analysis_file)) # Split per-partition files out of the global invalid analysis. if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions: with self.context.new_workunit(name='partition-analysis'): splits = [(x[1], x[2]) for x in partitions] if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, splits): raise TaskError('Failed to split invalid analysis into per-partition files.') # Now compile partitions one by one. for partition in partitions: (vts, sources, analysis_file) = partition self._process_target_partition(partition, cp) # No exception was thrown, therefore the compile succeded and analysis_file is now valid. if os.path.exists(analysis_file): # The compilation created an analysis. # Kick off the background artifact cache write. if self.get_artifact_cache() and self.context.options.write_to_artifact_cache: self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target) # Merge the newly-valid analysis into our global valid analysis. if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='update-upstream-analysis'): new_valid_analysis = analysis_file + '.valid.new' if self._zinc_utils.run_zinc_merge([self._analysis_file, analysis_file], new_valid_analysis): raise TaskError('Failed to merge new analysis back into valid analysis file.') ZincUtils._move_analysis(new_valid_analysis, self._analysis_file) else: # We need to keep analysis_file around. Background tasks may need it. ZincUtils._copy_analysis(analysis_file, self._analysis_file) if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file): with self.context.new_workunit(name='trim-downstream-analysis'): # Trim out the newly-valid sources from our global invalid analysis. new_invalid_analysis = analysis_file + '.invalid.new' discarded_invalid_analysis = analysis_file + '.invalid.discard' if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, [(sources, discarded_invalid_analysis), ([], new_invalid_analysis)]): raise TaskError('Failed to trim invalid analysis file.') ZincUtils._move_analysis(new_invalid_analysis, self._invalid_analysis_file) # Now that all the analysis accounting is complete, we can safely mark the # targets as valid. vts.update() # Check for missing dependencies, if needed. if invalidation_check.invalid_vts and os.path.exists(self._analysis_file): deps_cache = JvmDependencyCache(self.context, scala_targets, self._analysis_file, self._classes_dir) deps_cache.check_undeclared_dependencies() # Provide the target->class and source->class mappings to downstream tasks if needed. if self.context.products.isrequired('classes'): sources_by_target = self._compute_sources_by_target(scala_targets) classes_by_source = self._compute_classes_by_source() self._add_all_products_to_genmap(sources_by_target, classes_by_source) # Update the classpath for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
def execute(self, targets): scala_targets = filter(lambda t: has_sources(t, '.scala'), targets) if not scala_targets: return # Get the exclusives group for the targets to compile. # Group guarantees that they'll be a single exclusives key for them. egroups = self.context.products.get_data('exclusives_groups') group_id = egroups.get_group_key_for_target(scala_targets[0]) # Add resource dirs to the classpath for us and for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)]) # Get the classpath generated by upstream JVM tasks (including previous calls to execute()). cp = egroups.get_classpath_for_group(group_id) # Add (only to the local copy) classpath entries necessary for our compiler plugins. for conf in self._confs: for jar in self._zinc_utils.plugin_jars(): cp.insert(0, (conf, jar)) # Invalidation check. Everything inside the with block must succeed for the # invalid targets to become valid. with self.invalidated(scala_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint ) as invalidation_check: if invalidation_check.invalid_vts and not self.dry_run: invalid_targets = [ vt.target for vt in invalidation_check.invalid_vts ] # The analysis for invalid and deleted sources is no longer valid. invalid_sources_by_target = self._compute_sources_by_target( invalid_targets) invalid_sources = list( itertools.chain.from_iterable( invalid_sources_by_target.values())) deleted_sources = self._get_deleted_sources() # Work in a tmpdir so we don't stomp the main analysis files on error. # The tmpdir is cleaned up in a shutdown hook, because background work # may need to access files we create here even after this method returns. self._ensure_analysis_tmpdir() tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4())) os.mkdir(tmpdir) valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis') newly_invalid_analysis_tmp = os.path.join( tmpdir, 'newly_invalid_analysis') invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis') if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit(name='prepare-analysis'): if self._zinc_utils.run_zinc_split( self._analysis_file, ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp), ([], valid_analysis_tmp))): raise TaskError( 'Failed to split off invalid analysis.') if ZincUtils.is_nonempty_analysis( self._invalid_analysis_file): if self._zinc_utils.run_zinc_merge([ self._invalid_analysis_file, newly_invalid_analysis_tmp ], invalid_analysis_tmp): raise TaskError( 'Failed to merge prior and current invalid analysis.' ) else: invalid_analysis_tmp = newly_invalid_analysis_tmp # Now it's OK to overwrite the main analysis files with the new state. ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file) ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file) # Figure out the sources and analysis belonging to each partition. partitions = [ ] # Each element is a triple (vts, sources_by_target, analysis). for vts in invalidation_check.invalid_vts_partitioned: partition_tmpdir = os.path.join( tmpdir, Target.maybe_readable_identify(vts.targets)) os.mkdir(partition_tmpdir) sources = list( itertools.chain.from_iterable([ invalid_sources_by_target.get(t, []) for t in vts.targets ])) analysis_file = os.path.join(partition_tmpdir, 'analysis') partitions.append((vts, sources, analysis_file)) # Split per-partition files out of the global invalid analysis. if ZincUtils.is_nonempty_analysis( self._invalid_analysis_file) and partitions: with self.context.new_workunit(name='partition-analysis'): splits = [(x[1], x[2]) for x in partitions] if self._zinc_utils.run_zinc_split( self._invalid_analysis_file, splits): raise TaskError( 'Failed to split invalid analysis into per-partition files.' ) # Now compile partitions one by one. for partition in partitions: (vts, sources, analysis_file) = partition self._process_target_partition(partition, cp) # No exception was thrown, therefore the compile succeded and analysis_file is now valid. if os.path.exists( analysis_file ): # The compilation created an analysis. # Kick off the background artifact cache write. if self.get_artifact_cache( ) and self.context.options.write_to_artifact_cache: self._write_to_artifact_cache( analysis_file, vts, invalid_sources_by_target) # Merge the newly-valid analysis into our global valid analysis. if ZincUtils.is_nonempty_analysis(self._analysis_file): with self.context.new_workunit( name='update-upstream-analysis'): new_valid_analysis = analysis_file + '.valid.new' if self._zinc_utils.run_zinc_merge( [self._analysis_file, analysis_file], new_valid_analysis): raise TaskError( 'Failed to merge new analysis back into valid analysis file.' ) ZincUtils._move_analysis(new_valid_analysis, self._analysis_file) else: # We need to keep analysis_file around. Background tasks may need it. ZincUtils._copy_analysis(analysis_file, self._analysis_file) if ZincUtils.is_nonempty_analysis( self._invalid_analysis_file): with self.context.new_workunit( name='trim-downstream-analysis'): # Trim out the newly-valid sources from our global invalid analysis. new_invalid_analysis = analysis_file + '.invalid.new' discarded_invalid_analysis = analysis_file + '.invalid.discard' if self._zinc_utils.run_zinc_split( self._invalid_analysis_file, [(sources, discarded_invalid_analysis), ([], new_invalid_analysis)]): raise TaskError( 'Failed to trim invalid analysis file.') ZincUtils._move_analysis( new_invalid_analysis, self._invalid_analysis_file) # Now that all the analysis accounting is complete, we can safely mark the # targets as valid. vts.update() # Check for missing dependencies, if needed. if invalidation_check.invalid_vts and os.path.exists( self._analysis_file): deps_cache = JvmDependencyCache(self.context, scala_targets, self._analysis_file, self._classes_dir) deps_cache.check_undeclared_dependencies() # Provide the target->class and source->class mappings to downstream tasks if needed. if self.context.products.isrequired('classes'): sources_by_target = self._compute_sources_by_target(scala_targets) classes_by_source = self._compute_classes_by_source() self._add_all_products_to_genmap(sources_by_target, classes_by_source) # Update the classpath for downstream tasks. for conf in self._confs: egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])