def task_generator(self): for i in range(len(self.processor_spec)): cs_task = helpers.PartialSpecification( SeethroughCompareTask, processor_spec=self.processor_spec[i], tgt_z_offset=self.tgt_z_offset, src_stack=self.src_stack, pad=self.pad, crop=self.crop, tgt_stack=self.tgt_stack, seethrough_limit=self.seethrough_limit[i], pixel_offset_layer=self.pixel_offset_layer, ) chunked_job = ChunkedJob( task_class=cs_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=1, mip=self.mip, bcube=self.bcube, suffix=self.suffix, ) yield from chunked_job.task_generator # Each seethrough processor writes to the same mask layer, so we # wait for each processor to finish to avoid race conditions. if i < len(self.processor_spec) - 1: yield scheduling.wait_until_done
def task_generator(self): for i in range(len(self.processor_spec)): this_proc = self.processor_spec[i] this_proc_mip = self.processor_mip[i] is_last_proc = i == len(self.processor_spec) - 1 this_task = helpers.PartialSpecification( ApplyProcessorTask, src_stack=self.src_stack, processor_spec=this_proc, pad=self.pad, crop=self.crop, ) chunked_job = ChunkedJob( task_class=this_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=this_proc_mip, bcube=self.bcube ) yield from chunked_job.task_generator if not is_last_proc: yield scheduling.wait_until_done next_proc_mip = self.processor_mip[i + 1] if this_proc_mip > next_proc_mip: downsample_job = DownsampleJob( src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=this_proc_mip, mip_end=next_proc_mip, bcube=self.bcube ) yield from downsample_job.task_generator yield scheduling.wait_until_done if self.processor_mip[0] > self.processor_mip[-1]: # good manners # prepare the ground for the next you downsample_job = DownsampleJob( src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=self.processor_mip[-1], mip_end=self.processor_mip[0], bcube=self.bcube )
def task_generator(self): this_task = helpers.PartialSpecification( InvertFieldTask, src_layer=self.src_layer, pad=self.pad, crop=self.crop, ) chunked_job = ChunkedJob(task_class=this_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=self.mip, bcube=self.bcube) yield from chunked_job.task_generator
def task_generator(self): cs_task = helpers.PartialSpecification( CompareSectionsTask, processor_spec=self.processor_spec, tgt_z_offset=self.tgt_z_offset, src_stack=self.src_stack, pad=self.pad, crop=self.crop, tgt_stack=self.tgt_stack, ) chunked_job = ChunkedJob(task_class=cs_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=1, mip=self.mip, bcube=self.bcube, suffix=self.suffix) yield from chunked_job.task_generator
def task_generator(self): intermediary_fields = [] for i in range(len(self.processor_spec)): this_proc = self.processor_spec[i] this_proc_mip = self.processor_mip[i] this_proc_vv = self.processor_vv[i] is_last_proc = i == len(self.processor_spec) - 1 if is_last_proc: # if it's the last processor, the dst_layer is final result proc_field_layer = self.dst_layer else: # if it's not the last processor, need to create intermediate layer proc_field_layer_name = f'align_field_stage_{i}{self.suffix}' intermediary_fields.append(proc_field_layer_name) proc_field_layer = self.src_stack.create_sublayer( proc_field_layer_name, layer_type='field', overwrite=True) # In case this field is already written during previous runs, # disconnect it from the src_stack self.src_stack.remove_layer(proc_field_layer_name) # TODO: vector_vote #if this_proc_vv == 1: cf_task = helpers.PartialSpecification( ComputeFieldTask, src_stack=self.src_stack, tgt_stack=self.tgt_stack, processor_spec=this_proc, pad=self.pad, crop=self.crop, tgt_z_offset=self.tgt_z_offset, clear_nontissue_field=self.clear_nontissue_field) chunked_job = ChunkedJob(task_class=cf_task, dst_layer=proc_field_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=this_proc_mip, bcube=self.bcube, suffix=self.suffix) yield from chunked_job.task_generator if not is_last_proc: yield scheduling.wait_until_done # Now we're sure the proc_field_layer doesn't have stale data, # add it back self.src_stack.add_layer(proc_field_layer) # this processors MIP has the freshest field proc_field_layer.data_mip = this_proc_mip next_proc_mip = self.processor_mip[i + 1] if this_proc_mip < next_proc_mip: downsample_job = DownsampleJob(src_layer=proc_field_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=this_proc_mip, mip_end=next_proc_mip, bcube=self.bcube) yield from downsample_job.task_generator yield scheduling.wait_until_done proc_field_layer.data_mip = next_proc_mip if self.processor_mip[0] > self.processor_mip[-1]: # good manners # prepare the ground for the next you # downsample the fields all the way down so that the next coarse can start right away downsample_job = DownsampleJob(src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=self.processor_mip[-1], mip_end=self.processor_mip[0], bcube=self.bcube) yield scheduling.wait_until_done yield from downsample_job.task_generator # field is fresh at all mip layers self.dst_layer.data_mip = None # Now that the final field is ready, # remove intermediary fields from the source stack for intermediary_field in intermediary_fields: self.src_stack.remove_layer(intermediary_field)