def task_generator(self): for i in range(len(self.processor_spec)): cs_task = helpers.PartialSpecification( SeethroughCompareTask, processor_spec=self.processor_spec[i], tgt_z_offset=self.tgt_z_offset, src_stack=self.src_stack, pad=self.pad, crop=self.crop, tgt_stack=self.tgt_stack, seethrough_limit=self.seethrough_limit[i], pixel_offset_layer=self.pixel_offset_layer, ) chunked_job = ChunkedJob( task_class=cs_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=1, mip=self.mip, bcube=self.bcube, suffix=self.suffix, ) yield from chunked_job.task_generator # Each seethrough processor writes to the same mask layer, so we # wait for each processor to finish to avoid race conditions. if i < len(self.processor_spec) - 1: yield scheduling.wait_until_done
def task_generator(self): for i in range(len(self.processor_spec)): this_proc = self.processor_spec[i] this_proc_mip = self.processor_mip[i] is_last_proc = i == len(self.processor_spec) - 1 this_task = helpers.PartialSpecification( ApplyProcessorTask, src_stack=self.src_stack, processor_spec=this_proc, pad=self.pad, crop=self.crop, ) chunked_job = ChunkedJob( task_class=this_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=this_proc_mip, bcube=self.bcube ) yield from chunked_job.task_generator if not is_last_proc: yield scheduling.wait_until_done next_proc_mip = self.processor_mip[i + 1] if this_proc_mip > next_proc_mip: downsample_job = DownsampleJob( src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=this_proc_mip, mip_end=next_proc_mip, bcube=self.bcube ) yield from downsample_job.task_generator yield scheduling.wait_until_done if self.processor_mip[0] > self.processor_mip[-1]: # good manners # prepare the ground for the next you downsample_job = DownsampleJob( src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=self.processor_mip[-1], mip_end=self.processor_mip[0], bcube=self.bcube )
def task_generator(self): this_task = helpers.PartialSpecification( InvertFieldTask, src_layer=self.src_layer, pad=self.pad, crop=self.crop, ) chunked_job = ChunkedJob(task_class=this_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=self.mip, bcube=self.bcube) yield from chunked_job.task_generator
def task_generator(self): cs_task = helpers.PartialSpecification( CompareSectionsTask, processor_spec=self.processor_spec, tgt_z_offset=self.tgt_z_offset, src_stack=self.src_stack, pad=self.pad, crop=self.crop, tgt_stack=self.tgt_stack, ) chunked_job = ChunkedJob(task_class=cs_task, dst_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=1, mip=self.mip, bcube=self.bcube, suffix=self.suffix) yield from chunked_job.task_generator
def task_generator(self): intermediary_fields = [] for i in range(len(self.processor_spec)): this_proc = self.processor_spec[i] this_proc_mip = self.processor_mip[i] this_proc_vv = self.processor_vv[i] is_last_proc = i == len(self.processor_spec) - 1 if is_last_proc: # if it's the last processor, the dst_layer is final result proc_field_layer = self.dst_layer else: # if it's not the last processor, need to create intermediate layer proc_field_layer_name = f'align_field_stage_{i}{self.suffix}' intermediary_fields.append(proc_field_layer_name) proc_field_layer = self.src_stack.create_sublayer( proc_field_layer_name, layer_type='field', overwrite=True) # In case this field is already written during previous runs, # disconnect it from the src_stack self.src_stack.remove_layer(proc_field_layer_name) # TODO: vector_vote #if this_proc_vv == 1: cf_task = helpers.PartialSpecification( ComputeFieldTask, src_stack=self.src_stack, tgt_stack=self.tgt_stack, processor_spec=this_proc, pad=self.pad, crop=self.crop, tgt_z_offset=self.tgt_z_offset, clear_nontissue_field=self.clear_nontissue_field) chunked_job = ChunkedJob(task_class=cf_task, dst_layer=proc_field_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, blend_xy=self.blend_xy, mip=this_proc_mip, bcube=self.bcube, suffix=self.suffix) yield from chunked_job.task_generator if not is_last_proc: yield scheduling.wait_until_done # Now we're sure the proc_field_layer doesn't have stale data, # add it back self.src_stack.add_layer(proc_field_layer) # this processors MIP has the freshest field proc_field_layer.data_mip = this_proc_mip next_proc_mip = self.processor_mip[i + 1] if this_proc_mip < next_proc_mip: downsample_job = DownsampleJob(src_layer=proc_field_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=this_proc_mip, mip_end=next_proc_mip, bcube=self.bcube) yield from downsample_job.task_generator yield scheduling.wait_until_done proc_field_layer.data_mip = next_proc_mip if self.processor_mip[0] > self.processor_mip[-1]: # good manners # prepare the ground for the next you # downsample the fields all the way down so that the next coarse can start right away downsample_job = DownsampleJob(src_layer=self.dst_layer, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip_start=self.processor_mip[-1], mip_end=self.processor_mip[0], bcube=self.bcube) yield scheduling.wait_until_done yield from downsample_job.task_generator # field is fresh at all mip layers self.dst_layer.data_mip = None # Now that the final field is ready, # remove intermediary fields from the source stack for intermediary_field in intermediary_fields: self.src_stack.remove_layer(intermediary_field)
def align( ctx, src_layer_spec, dst_folder, render_pad, render_chunk_xy, processor_spec, pad, crop, processor_mip, chunk_xy, start_coord, end_coord, coord_mip, bad_starter_path, block_size, stitch_size, vote_dist, consensus_threshold, blur_sigma, kernel_size, blend_xy, force_chunk_xy, suffix, seethrough_spec, seethrough_limit, seethrough_spec_mip, decay_dist, blur_rate, restart_stage, restart_suffix, ): scheduler = ctx.obj["scheduler"] if suffix is None: suffix = "_aligned" else: suffix = f"_{suffix}" if (restart_suffix is None) or (restart_stage == 0): restart_suffix = suffix if crop is None: crop = pad corgie_logger.debug("Setting up layers...") # TODO: store stitching images in layer other than even & odd if vote_dist + stitch_size - 2 >= block_size: raise exceptions.CorgieException( "block_size too small for stitching + voting requirements (stitch_size + vote_dist)" ) corgie_logger.debug("Setting up layers...") src_stack = create_stack_from_spec(src_layer_spec, name="src", readonly=True) src_stack.folder = dst_folder if force_chunk_xy is None: force_chunk_xy = chunk_xy dst_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=dst_folder, name="dst", types=["img", "mask"], readonly=False, suffix=restart_suffix, force_chunk_xy=force_chunk_xy, overwrite=True, ) even_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=os.path.join(dst_folder, "even"), name="even", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True, ) odd_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=os.path.join(dst_folder, "odd"), name="odd", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True, ) corgie_logger.debug("Done!") bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip) corgie_logger.debug("Calculating blocks...") skip_list = [] if bad_starter_path is not None: with open(bad_starter_path) as f: line = f.readline() while line: skip_list.append(int(line)) line = f.readline() blocks = get_blocks( start=bcube.z_range()[0], stop=bcube.z_range()[1], block_size=block_size, block_overlap=1, skip_list=skip_list, src_stack=src_stack, even_stack=even_stack, odd_stack=odd_stack, ) stitch_blocks = [b.overlap(stitch_size) for b in blocks[1:]] corgie_logger.debug("All Blocks") for block, stitch_block in zip(blocks, [None] + stitch_blocks): corgie_logger.debug(block) corgie_logger.debug(f"Stitch {stitch_block}") corgie_logger.debug("\n") max_blur_mip = (math.ceil(math.log(decay_dist * blur_rate + 1, 2)) + processor_mip[-1]) corgie_logger.debug(f"Max blur mip for stitching field: {max_blur_mip}") # Set all field names, adjusting for restart suffix block_field_name = f"field{suffix}" stitch_estimated_suffix = f"_stitch_estimated{suffix}" stitch_estimated_name = f"field{stitch_estimated_suffix}" stitch_corrected_name = f"stitch_corrected{suffix}" stitch_corrected_field = None composed_name = f"composed{suffix}" if restart_stage <= 2: stitch_estimated_suffix = f"_stitch_estimated{restart_suffix}" stitch_estimated_name = f"field{stitch_estimated_suffix}" stitch_corrected_name = f"stitch_corrected{restart_suffix}" if restart_stage <= 3: composed_name = f"composed{restart_suffix}" render_method = helpers.PartialSpecification( f=RenderJob, pad=render_pad, chunk_xy=render_chunk_xy, chunk_z=1, render_masks=False, ) cf_method = helpers.PartialSpecification( f=ComputeFieldJob, pad=pad, crop=crop, processor_mip=processor_mip, processor_spec=processor_spec, chunk_xy=chunk_xy, blend_xy=blend_xy, chunk_z=1, ) if seethrough_spec != tuple(): assert seethrough_spec_mip is not None seethrough_method = helpers.PartialSpecification( f=SeethroughCompareJob, mip=seethrough_spec_mip, processor_spec=seethrough_spec, chunk_xy=chunk_xy, pad=pad, crop=pad, seethrough_limit=seethrough_limit, ) else: seethrough_method = None #restart_stage = 4 #import pdb; pdb.set_trace() if restart_stage == 0: corgie_logger.debug("Aligning blocks...") for block in blocks: block_bcube = block.get_bcube(bcube) # Use copies of src & dst so that aligning the stitching blocks # is not affected by these block fields. # Copying also allows local compute to not modify objects for other tasks align_block_job_forv = AlignBlockJob( src_stack=deepcopy(block.src_stack), dst_stack=deepcopy(block.dst_stack), bcube=block_bcube, render_method=render_method, cf_method=cf_method, vote_dist=vote_dist, seethrough_method=seethrough_method, suffix=suffix, copy_start=True, use_starters=True, backward=False, consensus_threshold=consensus_threshold, blur_sigma=blur_sigma, kernel_size=kernel_size, ) scheduler.register_job( align_block_job_forv, job_name=f"Forward Align {block} {block_bcube}", ) scheduler.execute_until_completion() corgie_logger.debug("Done!") if restart_stage <= 1: corgie_logger.debug("Aligning stitching blocks...") for stitch_block in stitch_blocks: block_bcube = stitch_block.get_bcube(bcube) # These blocks will have block-aligned images, but not # the block_fields that warped them. align_block_job_forv = AlignBlockJob( src_stack=deepcopy(stitch_block.src_stack), dst_stack=deepcopy(stitch_block.dst_stack), bcube=block_bcube, render_method=render_method, cf_method=cf_method, vote_dist=vote_dist, seethrough_method=seethrough_method, suffix=stitch_estimated_suffix, copy_start=False, use_starters=False, backward=False, consensus_threshold=consensus_threshold, blur_sigma=blur_sigma, kernel_size=kernel_size, ) scheduler.register_job( align_block_job_forv, job_name=f"Stitch Align {stitch_block} {block_bcube}", ) scheduler.execute_until_completion() corgie_logger.debug("Done!") # Add in the stitch_estimated fields that were just created above even_stack.create_sublayer( stitch_estimated_name, layer_type="field", overwrite=False, ) odd_stack.create_sublayer( stitch_estimated_name, layer_type="field", overwrite=False, ) if restart_stage <= 2: if stitch_size > 1: corgie_logger.debug("Voting over stitching blocks") stitch_corrected_field = dst_stack.create_sublayer( stitch_corrected_name, layer_type="field", overwrite=True) for stitch_block in stitch_blocks: stitch_estimated_field = stitch_block.dst_stack[ stitch_estimated_name] block_bcube = bcube.reset_coords( zs=stitch_block.start, ze=stitch_block.start + 1, in_place=False, ) z_offsets = [ z - block_bcube.z_range()[0] for z in range(stitch_block.start, stitch_block.stop) ] vote_stitch_job = VoteJob( input_fields=[stitch_estimated_field], output_field=stitch_corrected_field, chunk_xy=chunk_xy, bcube=block_bcube, z_offsets=z_offsets, mip=processor_mip[-1], consensus_threshold=consensus_threshold, blur_sigma=blur_sigma, kernel_size=kernel_size, ) scheduler.register_job( vote_stitch_job, job_name=f"Stitching Vote {stitch_block} {block_bcube}", ) scheduler.execute_until_completion() corgie_logger.debug("Done!") for stitch_block in stitch_blocks: block_bcube = bcube.reset_coords(zs=stitch_block.start, ze=stitch_block.start + 1, in_place=False) field_to_downsample = stitch_block.dst_stack[stitch_estimated_name] if stitch_corrected_field is not None: field_to_downsample = stitch_corrected_field # Hack for fafb field_info = field_to_downsample.get_info() for scale in field_info['scales']: scale['chunk_sizes'][-1][-1] = 1 scale['encoding'] = 'raw' field_to_downsample.cv.store_info(field_info) field_to_downsample.cv.fetch_info() downsample_field_job = DownsampleJob( src_layer=field_to_downsample, mip_start=processor_mip[-1], mip_end=max_blur_mip, bcube=block_bcube, chunk_xy= chunk_xy, # TODO: This probably needs to be modified at highest mips chunk_z=1, mips_per_task=2, ) scheduler.register_job( downsample_field_job, job_name=f"Downsample stitching field {block_bcube}", ) scheduler.execute_until_completion() corgie_logger.debug("Done!") # Add in the block-align fields even_stack.create_sublayer( block_field_name, layer_type="field", overwrite=False, ) odd_stack.create_sublayer( block_field_name, layer_type="field", overwrite=False, ) composed_field = dst_stack.create_sublayer(composed_name, layer_type="field", overwrite=True) if (restart_stage > 2) and (stitch_size > 1): stitch_corrected_field = dst_stack.create_sublayer( stitch_corrected_name, layer_type="field", overwrite=False) if restart_stage <= 3: corgie_logger.debug("Stitching blocks...") for block, stitch_block in zip(blocks[1:], stitch_blocks): block_bcube = block.broadcastable().get_bcube(bcube) block_list = block.get_neighbors(dist=decay_dist) corgie_logger.debug(f"src_block: {block}") corgie_logger.debug(f"influencing blocks: {block_list}") z_list = [b.stop for b in block_list] # stitch_corrected_field used if there is multi-section block overlap, # which requires voting to produce a corrected field. # If there is only single-section block overlap, then use # stitch_estimated_fields from each stitch_block if stitch_corrected_field is not None: stitching_fields = [stitch_corrected_field] else: # Order with furthest block first (convention of FieldSet). stitching_fields = [ stitch_block.dst_stack[stitch_estimated_name], stitch_block.src_stack[stitch_estimated_name], ] broadcast_job = BroadcastJob( block_field=block.dst_stack[block_field_name], stitching_fields=stitching_fields, output_field=composed_field, chunk_xy=chunk_xy, bcube=block_bcube, pad=pad, z_list=z_list, mip=processor_mip[-1], decay_dist=decay_dist, blur_rate=blur_rate, ) scheduler.register_job(broadcast_job, job_name=f"Broadcast {block} {block_bcube}") scheduler.execute_until_completion() corgie_logger.debug("Done!") if len(blocks) > 1: block_bcube = blocks[0].get_bcube(bcube) copy_job = CopyLayerJob( src_layer=even_stack[block_field_name], dst_layer=composed_field, mip=processor_mip[-1], bcube=block_bcube, chunk_xy=chunk_xy, chunk_z=1, ) scheduler.register_job( copy_job, job_name=f"Copy first block_field to composed_field location") scheduler.execute_until_completion() corgie_logger.debug("Done!") if restart_stage <= 4: if len(blocks) == 1: block_bcube = blocks[0].get_bcube(bcube) render_job = RenderJob( src_stack=src_stack, dst_stack=dst_stack, mips=processor_mip[-1], pad=pad, bcube=block_bcube, chunk_xy=chunk_xy, chunk_z=1, render_masks=True, blackout_masks=False, additional_fields=[even_stack[block_field_name]], ) scheduler.register_job( render_job, job_name=f"Render first block {block_bcube}") else: block_bcube = bcube.reset_coords(zs=blocks[0].start, ze=blocks[-1].stop, in_place=False) render_job = RenderJob( src_stack=src_stack, dst_stack=dst_stack, mips=processor_mip[-1], pad=pad, bcube=block_bcube, chunk_xy=chunk_xy, chunk_z=1, render_masks=True, blackout_masks=True, additional_fields=[composed_field], ) scheduler.register_job(render_job, job_name=f"Render all blocks {block_bcube}") scheduler.execute_until_completion() corgie_logger.debug("Done!") result_report = ( f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}") corgie_logger.info(result_report)
def align_block( ctx, src_layer_spec, dst_folder, vote_dist, render_pad, render_chunk_xy, processor_spec, pad, crop, processor_mip, chunk_xy, start_coord, end_coord, coord_mip, blend_xy, force_chunk_xy, suffix, copy_start, use_starters, seethrough_spec, seethrough_limit, seethrough_spec_mip, mode, chunk_z=1, ): scheduler = ctx.obj["scheduler"] if suffix is None: suffix = "_aligned" else: suffix = f"_{suffix}" if crop is None: crop = pad corgie_logger.debug("Setting up layers...") src_stack = create_stack_from_spec(src_layer_spec, name="src", readonly=True) src_stack.folder = dst_folder force_chunk_xy = chunk_xy if force_chunk_xy else None dst_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=dst_folder, name="dst", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True, ) render_method = helpers.PartialSpecification( f=RenderJob, pad=render_pad, chunk_xy=render_chunk_xy, chunk_z=1, render_masks=False, ) if seethrough_spec != tuple(): assert seethrough_spec_mip is not None seethrough_method = helpers.PartialSpecification( f=SeethroughCompareJob, mip=seethrough_spec_mip, processor_spec=seethrough_spec, chunk_xy=chunk_xy, pad=pad, crop=pad, seethrough_limit=seethrough_limit, ) else: seethrough_method = None cf_method = helpers.PartialSpecification( f=ComputeFieldJob, pad=pad, crop=crop, processor_mip=processor_mip, processor_spec=processor_spec, chunk_xy=chunk_xy, blend_xy=blend_xy, chunk_z=1, ) bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip) if mode == "bidirectional": z_mid = (bcube.z_range()[1] + bcube.z_range()[0]) // 2 bcube_back = bcube.reset_coords(ze=z_mid, in_place=False) bcube_forv = bcube.reset_coords(zs=z_mid, in_place=False) align_block_job_back = AlignBlockJob( src_stack=src_stack, dst_stack=dst_stack, bcube=bcube_back, render_method=render_method, cf_method=cf_method, seethrough_method=seethrough_method, suffix=suffix, copy_start=copy_start, backward=True, vote_dist=vote_dist, use_starters=use_starters, ) scheduler.register_job( align_block_job_back, job_name="Backward Align Block {}".format(bcube), ) align_block_job_forv = AlignBlockJob( src_stack=src_stack, dst_stack=deepcopy(dst_stack), bcube=bcube_forv, render_method=render_method, cf_method=cf_method, seethrough_method=seethrough_method, suffix=suffix, copy_start=True, backward=False, vote_dist=vote_dist, use_starters=use_starters, ) scheduler.register_job( align_block_job_forv, job_name="Forward Align Block {}".format(bcube), ) else: align_block_job = AlignBlockJob( src_stack=src_stack, dst_stack=dst_stack, bcube=bcube, render_method=render_method, cf_method=cf_method, seethrough_method=seethrough_method, suffix=suffix, copy_start=copy_start, backward=mode == "backward", vote_dist=vote_dist, use_starters=use_starters, ) # create scheduler and execute the job scheduler.register_job(align_block_job, job_name="Align Block {}".format(bcube)) scheduler.execute_until_completion() result_report = ( f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}" ) corgie_logger.info(result_report)
def align_block(ctx, src_layer_spec, tgt_layer_spec, dst_folder, render_pad, render_chunk_xy, processor_spec, pad, crop, processor_mip, chunk_xy, start_coord, end_coord, coord_mip, suffix, copy_start, mode, chunk_z=1): scheduler = ctx.obj['scheduler'] if suffix is None: suffix = '_aligned' else: suffix = f"_{suffix}" if crop is None: crop = pad corgie_logger.debug("Setting up layers...") src_stack = create_stack_from_spec(src_layer_spec, name='src', readonly=True) tgt_stack = create_stack_from_spec(tgt_layer_spec, name='tgt', readonly=True, reference=src_stack) dst_stack = stack.create_stack_from_reference(reference_stack=src_stack, folder=dst_folder, name="dst", types=["img", "mask"], readonly=False, suffix=suffix) render_method = helpers.PartialSpecification( f=RenderJob, pad=render_pad, chunk_xy=render_chunk_xy, chunk_z=1, blackout_masks=False, render_masks=True, mip=min(processor_mip) ) cf_method = helpers.PartialSpecification( f=ComputeFieldJob, pad=pad, crop=crop, processor_mip=processor_mip, processor_spec=processor_spec, chunk_xy=chunk_xy, chunk_z=1 ) bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip) if mode == 'bidirectional': z_mid = (bcube.z_range()[1] + bcube.z_range()[0]) // 2 bcube_back = bcube.reset_coords(ze=z_mid, in_place=False) bcube_forv = bcube.reset_coords(zs=z_mid, in_place=False) align_block_job_back = AlignBlockJob(src_stack=src_stack, tgt_stack=tgt_stack, dst_stack=dst_stack, bcube=bcube_back, render_method=render_method, cf_method=cf_method, suffix=suffix, copy_start=copy_start, backward=True) scheduler.register_job(align_block_job_back, job_name="Backward Align Block {}".format(bcube)) align_block_job_forv = AlignBlockJob(src_stack=src_stack, tgt_stack=tgt_stack, dst_stack=deepcopy(dst_stack), bcube=bcube_forv, render_method=render_method, cf_method=cf_method, suffix=suffix, copy_start=True, backward=False) scheduler.register_job(align_block_job_forv, job_name="Forward Align Block {}".format(bcube)) else: align_block_job = AlignBlockJob(src_stack=src_stack, tgt_stack=tgt_stack, dst_stack=dst_stack, bcube=bcube, render_method=render_method, cf_method=cf_method, suffix=suffix, copy_start=copy_start, backward=mode=='backward') # create scheduler and execute the job scheduler.register_job(align_block_job, job_name="Align Block {}".format(bcube)) scheduler.execute_until_completion() result_report = f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " \ f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}" corgie_logger.info(result_report)
def seethrough_block( ctx, src_layer_spec, dst_folder, chunk_xy, start_coord, end_coord, coord_mip, suffix, seethrough_spec, seethrough_limit, seethrough_spec_mip, force_chunk_z=1, ): scheduler = ctx.obj["scheduler"] if suffix is None: suffix = "_seethrough" else: suffix = f"_{suffix}" crop, pad = 0, 0 corgie_logger.debug("Setting up layers...") src_stack = create_stack_from_spec(src_layer_spec, name="src", readonly=True) src_stack.folder = dst_folder dst_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=dst_folder, name="dst", types=["img", "mask"], readonly=False, suffix=suffix, overwrite=True, force_chunk_z=force_chunk_z, ) render_method = helpers.PartialSpecification( f=RenderJob, pad=pad, chunk_xy=chunk_xy, chunk_z=1, render_masks=False, ) seethrough_method = helpers.PartialSpecification( f=SeethroughCompareJob, mip=seethrough_spec_mip, processor_spec=seethrough_spec, chunk_xy=chunk_xy, pad=pad, crop=pad, seethrough_limit=seethrough_limit, ) bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip) seethrough_block_job = SeethroughBlockJob( src_stack=src_stack, dst_stack=dst_stack, bcube=bcube, render_method=render_method, seethrough_method=seethrough_method, suffix=suffix, ) # create scheduler and execute the job scheduler.register_job(seethrough_block_job, job_name="Seethrough Block {}".format(bcube)) scheduler.execute_until_completion() result_report = ( f"Rendered layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}") corgie_logger.info(result_report)
def align(ctx, src_layer_spec, tgt_layer_spec, dst_folder, render_pad, render_chunk_xy, processor_spec, pad, crop, processor_mip, chunk_xy, start_coord, end_coord, coord_mip, bad_starter_path, block_size, block_overlap, blend_xy, force_chunk_xy, suffix, copy_start, seethrough_spec, seethrough_spec_mip): scheduler = ctx.obj['scheduler'] if suffix is None: suffix = '_aligned' else: suffix = f"_{suffix}" if crop is None: crop = pad corgie_logger.debug("Setting up layers...") src_stack = create_stack_from_spec(src_layer_spec, name='src', readonly=True) src_stack.folder = dst_folder tgt_stack = create_stack_from_spec(tgt_layer_spec, name='tgt', readonly=True, reference=src_stack) if force_chunk_xy is None: force_chunk_xy = chunk_xy dst_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=dst_folder, name="dst", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True) even_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=os.path.join(dst_folder, 'even'), name="even", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True) odd_stack = stack.create_stack_from_reference( reference_stack=src_stack, folder=os.path.join(dst_folder, 'odd'), name="odd", types=["img", "mask"], readonly=False, suffix=suffix, force_chunk_xy=force_chunk_xy, overwrite=True) corgie_logger.debug("Done!") bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip) corgie_logger.debug("Calculating blocks...") # TODO: read in bad starter sections bad_starter_sections = [] blocks = [] z = bcube.z_range()[0] while z < bcube.z_range()[-1]: block_start = z block_end = z + block_size while block_end + block_overlap in bad_starter_sections and \ block_end + block_overlap < bcube.z_range()[-1]: block_end += 1 block = Block(block_start, block_end + block_overlap) blocks.append(block) z = block_end corgie_logger.debug("Done!") render_method = helpers.PartialSpecification( f=RenderJob, pad=render_pad, chunk_xy=render_chunk_xy, chunk_z=1, render_masks=False, ) cf_method = helpers.PartialSpecification(f=ComputeFieldJob, pad=pad, crop=crop, processor_mip=processor_mip, processor_spec=processor_spec, chunk_xy=chunk_xy, blend_xy=blend_xy, chunk_z=1) if seethrough_spec is not None: assert seethrough_spec_mip is not None seethrough_method = helpers.PartialSpecification( f=CompareSectionsJob, mip=seethrough_spec_mip, processor_spec=seethrough_spec, chunk_xy=chunk_xy, pad=pad, crop=pad, ) else: seethrough_method = None corgie_logger.debug("Aligning blocks...") for i in range(len(blocks)): block = blocks[i] block_bcube = bcube.copy() block_bcube.reset_coords(zs=block.z_start, ze=block.z_end) if i % 2 == 0: block_dst_stack = even_stack else: block_dst_stack = odd_stack align_block_job_forv = AlignBlockJob( src_stack=src_stack, tgt_stack=tgt_stack, dst_stack=block_dst_stack, bcube=block_bcube, render_method=render_method, cf_method=cf_method, seethrough_method=seethrough_method, suffix=suffix, copy_start=copy_start, backward=False) scheduler.register_job(align_block_job_forv, job_name=f"Forward Align {block} {block_bcube}") scheduler.execute_until_completion() corgie_logger.debug("Done!") corgie_logger.debug("Stitching blocks...") #TODO #stitch_blocks_job = StitchBlockJob( # blocks=blocks, # src_stack=src_stack, # dst_stack=dst_stack, # bcube=bcube, # suffix=suffix, # render_method=render_method, # cf_method=cf_method #) #scheduler.register_job(stitch_blocks_job, job_name=f"Stitch blocks {bcube}") #scheduler.execute_until_completion() corgie_logger.debug("Done!") result_report = f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " \ f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}" corgie_logger.info(result_report)