コード例 #1
0
ファイル: create_skeletons.py プロジェクト: seung-lab/corgie
def create_skeletons(
    ctx,
    seg_layer_spec,
    dst_folder,
    timestamp,
    mip,
    teasar_scale,
    teasar_const,
    ids,
    ids_filepath,
    tick_threshold,
    chunk_xy,
    chunk_z,
    single_merge_mode,
    start_coord,
    end_coord,
    coord_mip,
):
    scheduler = ctx.obj["scheduler"]

    corgie_logger.debug("Setting up layers...")
    seg_stack = create_stack_from_spec(seg_layer_spec,
                                       name="src",
                                       readonly=True)
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    object_ids = ids
    if ids_filepath is not None:
        object_ids = []
        with open(ids_filepath, "r") as f:
            line = f.readline()
            while line:
                object_ids.append(int(line))
                line = f.readline()
    if object_ids is None or len(object_ids) == 0:
        raise ValueError("Must specify ids to skeletonize")
    object_ids = list(object_ids)
    teasar_params = {"scale": teasar_scale, "const": teasar_const}

    seg_layer = seg_stack.get_layers_of_type("segmentation")[0]
    skeleton_job = SkeletonJob(
        seg_layer=seg_layer,
        dst_path=dst_folder,
        timestamp=timestamp,
        bcube=bcube,
        chunk_xy=chunk_xy,
        chunk_z=chunk_z,
        mip=mip,
        teasar_params=teasar_params,
        object_ids=object_ids,
        tick_threshold=tick_threshold,
        single_merge_mode=single_merge_mode,
    )

    scheduler.register_job(skeleton_job,
                           job_name="Skeletonize {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = f"Skeletonized {str(seg_layer)}. "
    corgie_logger.info(result_report)
コード例 #2
0
ファイル: create_skeletons.py プロジェクト: seung-lab/corgie
 def execute(self):
     corgie_logger.info(f"Merging skeletons at {self.dst_path}")
     fragment_filenames = self.cf.list(prefix=self.prefix, flat=True)
     skeleton_files = self.cf.get(fragment_filenames)
     skeletons = defaultdict(list)
     for skeleton_file in skeleton_files:
         try:
             colon_index = skeleton_file["path"].index(":")
         except ValueError:
             # File is full skeleton, not fragment
             continue
         seg_id = skeleton_file["path"][0:colon_index]
         skeleton_fragment = pickle.loads(skeleton_file["content"])
         if not skeleton_fragment.empty():
             skeletons[seg_id].append(skeleton_fragment)
     for seg_id, skeleton_fragments in skeletons.items():
         skeleton = PrecomputedSkeleton.simple_merge(
             skeleton_fragments).consolidate()
         skeleton = kimimaro.postprocess(skeleton, self.dust_threshold,
                                         self.tick_threshold)
         skeleton.id = int(seg_id)
         self.cf.put(path=seg_id,
                     content=skeleton.to_precomputed(),
                     compress="gzip")
         corgie_logger.info(f"Finished skeleton {seg_id}")
コード例 #3
0
    def task_generator(self):
        chunks = self.dst_layer.break_bcube_into_chunks(
                bcube=self.bcube,
                chunk_xy=self.chunk_xy,
                chunk_z=1,
                mip=self.mip)

        if 'src_img' in self.src_specs[0]:
            tasks = [MergeRenderImageTask(
                            src_layers=self.src_layers,
                            src_specs=self.src_specs,
                            dst_layer=self.dst_layer,
                            mip=self.mip,
                            pad=self.pad,
                            bcube=input_chunk) for input_chunk in chunks]
        else:
            tasks = [MergeRenderMaskTask(
                            src_layers=self.src_layers,
                            src_specs=self.src_specs,
                            dst_layer=self.dst_layer,
                            mip=self.mip,
                            pad=self.pad,
                            bcube=input_chunk) for input_chunk in chunks]
        corgie_logger.info(
            f"Yielding render tasks for bcube: {self.bcube}, MIP: {self.mip}")

        yield tasks
コード例 #4
0
def transform_skeletons(
    ctx,
    vector_field_spec,
    src_folder,
    dst_folder,
    field_mip,
    ids,
    ids_filepath,
    task_vertex_size,
    calculate_skeleton_lengths,
    mip0_field,
):
    scheduler = ctx.obj["scheduler"]

    corgie_logger.debug("Setting up layers...")
    vf_stack = create_stack_from_spec(vector_field_spec,
                                      name="src",
                                      readonly=True)

    skeleton_ids = ids
    if ids_filepath is not None:
        skeleton_ids = []
        with open(ids_filepath, "r") as f:
            line = f.readline()
            while line:
                skeleton_ids.append(int(line))
                line = f.readline()
    if len(skeleton_ids) == 0:
        skeleton_ids = None
    else:
        skeleton_ids = list(skeleton_ids)

    skeleton_length_file = None
    if calculate_skeleton_lengths:
        import time

        if not os.path.exists("skeleton_lengths"):
            os.makedirs("skeleton_lengths")
        skeleton_length_file = f"skeleton_lengths/skeleton_lengths_{int(time.time())}"

    vf_layer = vf_stack.get_layers_of_type("field")[0]
    transform_skeletons_job = TransformSkeletonsJob(
        vector_field_layer=vf_layer,
        src_path=src_folder,
        dst_path=dst_folder,
        field_mip=field_mip,
        skeleton_ids=skeleton_ids,
        task_vertex_size=task_vertex_size,
        skeleton_length_file=skeleton_length_file,
        mip0_field=mip0_field,
    )

    scheduler.register_job(
        transform_skeletons_job,
        job_name="Transforming skeletons in {}".format(src_folder),
    )

    scheduler.execute_until_completion()
    result_report = f"Transformed skeletons stored at {dst_folder}. "
    corgie_logger.info(result_report)
コード例 #5
0
ファイル: create_skeletons.py プロジェクト: seung-lab/corgie
    def execute(self):
        corgie_logger.info(
            f"Skeletonizing {self.seg_layer} at MIP{self.mip}, region: {self.bcube}"
        )
        seg_data = self.seg_layer.read(bcube=self.bcube,
                                       mip=self.mip,
                                       timestamp=self.timestamp)
        resolution = self.seg_layer.cv[self.mip].resolution
        skeletons = kimimaro.skeletonize(
            seg_data,
            self.teasar_params,
            object_ids=self.object_ids,
            anisotropy=resolution,
            dust_threshold=self.dust_threshold,
            progress=False,
            fix_branching=self.fix_branching,
            fix_borders=self.fix_borders,
            fix_avocados=self.fix_avocados,
        ).values()

        minpt = self.bcube.minpt(self.mip)
        for skel in skeletons:
            skel.vertices[:] += minpt * resolution

        cf = CloudFiles(self.dst_path)
        for skel in skeletons:
            path = "{}:{}".format(skel.id, self.bcube.to_filename(self.mip))
            cf.put(
                path=path,
                content=pickle.dumps(skel),
                compress="gzip",
                content_type="application/python-pickle",
                cache_control=False,
            )
コード例 #6
0
ファイル: render.py プロジェクト: seung-lab/corgie
    def task_generator(self):
        for mip in self.mips:
            chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
                bcube=self.bcube,
                chunk_xy=self.chunk_xy,
                chunk_z=self.chunk_z,
                mip=mip,
                return_generator=True,
            )

            tasks = (RenderTask(
                self.src_stack,
                self.dst_stack,
                blackout_masks=self.blackout_masks,
                render_masks=self.render_masks,
                mip=mip,
                pad=self.pad,
                bcube=input_chunk,
                additional_fields=self.additional_fields,
                preserve_zeros=self.preserve_zeros,
                seethrough_mask_layer=self.seethrough_mask_layer,
                seethrough_offset=self.seethrough_offset,
            ) for input_chunk in chunks)
            corgie_logger.info(
                f"Yielding render tasks for bcube: {self.bcube}, MIP: {mip}")

            yield tasks
コード例 #7
0
def downsample(ctx, src_layer_spec, dst_layer_spec, mip_start,
        mip_end, chunk_xy, chunk_z, mips_per_task, start_coord,
        end_coord, coord_mip):
    scheduler = ctx.obj['scheduler']
    corgie_logger.debug("Setting up Source and Destination layers...")

    src_layer = create_layer_from_spec(src_layer_spec,
            caller_name='src layer',
            readonly=True)

    if dst_layer_spec is None:
        corgie_logger.info("Destination layer not specified. Using Source layer "
                "as Destination.")
        dst_layer = src_layer
        dst_layer.readonly = False
    else:
        dst_layer = create_layer_from_spec(dst_layer_spec,
            caller_name='dst_layer layer',
            readonly=False,
            reference=src_layer, chunk_z=chunk_z, overwrite=True)
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)
    downsample_job = DownsampleJob(src_layer=src_layer,
                                   dst_layer=dst_layer,
                                   mip_start=mip_start,
                                   mip_end=mip_end,
                                   bcube=bcube,
                                   chunk_xy=chunk_xy,
                                   chunk_z=chunk_z,
                                   mips_per_task=mips_per_task)

    # create scheduler and execute the job
    scheduler.register_job(downsample_job, job_name="downsample")
    scheduler.execute_until_completion()
    result_report = f"Downsampled {src_layer} from {mip_start} to {mip_end}. Result in {dst_layer}"
    corgie_logger.info(result_report)
コード例 #8
0
ファイル: fill_nearest.py プロジェクト: seung-lab/corgie
def fill_nearest(
    ctx,
    src_layer_spec,
    dst_folder,
    chunk_xy,
    start_coord,
    end_coord,
    coord_mip,
    suffix,
    mip,
    radius,
    force_chunk_z=1,
):
    scheduler = ctx.obj["scheduler"]

    if suffix is None:
        suffix = "_seethrough"
    else:
        suffix = f"_{suffix}"

    crop, pad = 0, 0
    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec,
                                       name="src",
                                       readonly=True)
    src_stack.folder = dst_folder
    dst_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=dst_folder,
        name="dst",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        overwrite=True,
        force_chunk_z=force_chunk_z,
    )
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    fill_nearest_job = FillNearestJob(
        src_stack=src_stack,
        dst_stack=dst_stack,
        bcube=bcube,
        radius=radius,
        mip=mip,
        chunk_xy=chunk_xy,
    )
    # create scheduler and execute the job
    scheduler.register_job(fill_nearest_job,
                           job_name="Fill Nearest Block {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = (
        f"Rendered layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. "
        f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}")
    corgie_logger.info(result_report)
コード例 #9
0
ファイル: combine_masks.py プロジェクト: seung-lab/corgie
def combine_masks(
    ctx,
    src_layer_spec,
    dst_layer_spec,
    exp,
    chunk_xy,
    chunk_z,
    force_chunk_xy,
    force_chunk_z,
    start_coord,
    end_coord,
    coord_mip,
    mip,
    pad,
):
    scheduler = ctx.obj["scheduler"]

    if not force_chunk_xy:
        force_chunk_xy = chunk_xy

    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec, name="src", readonly=True)
    reference_layer = src_stack.reference_layer

    dst_layer = create_layer_from_spec(
        dst_layer_spec,
        allowed_types=["mask"],
        default_type="mask",
        readonly=False,
        caller_name="dst_layer",
        reference=reference_layer,
        force_chunk_xy=force_chunk_xy,
        force_chunk_z=force_chunk_z,
        overwrite=True,
    )
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    combine_masks_job = CombineMasksJob(
        src_stack=src_stack,
        exp=json.loads(exp),
        dst_layer=dst_layer,
        mip=mip,
        bcube=bcube,
        pad=pad,
        chunk_xy=chunk_xy,
        chunk_z=chunk_z,
    )
    # create scheduler and execute the job
    scheduler.register_job(combine_masks_job, job_name="Combine Masks {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = f"Results in {str(dst_layer)}"
    corgie_logger.info(result_report)
コード例 #10
0
def spec_to_layer_dict_readonly(layer_specs):
    """Create dict of layers from a corgie spec indexed by unique id

    These layers will be readonly

    Args:
        layer_specs (dict): layer specs indexed by unique id
        spec_key (str): src/tgt
    """
    layers = {}
    for k, s in layer_specs.items():
        corgie_logger.info(f'Creating layer no. {k}')
        layers[k] = create_layer_from_dict(s, readonly=True)
    return layers
コード例 #11
0
    def execute(self):

        corgie_logger.info(
            f"Generate new skeleton vertices task for id {self.skeleton_id_str}"
        )
        skeleton = get_skeleton(self.src_path, self.skeleton_id_str)
        if self.vertex_sort:
            vertex_sort = skeleton.vertices[:, 2].argsort()
        else:
            vertex_sort = np.arange(0, len(skeleton.vertices))
        number_vertices = len(skeleton.vertices)
        index_points = list(range(0, number_vertices, self.task_vertex_size))
        cf = CloudFiles(f"{self.dst_path}")
        array_filenames = []
        for i in range(len(index_points)):
            start_index = index_points[i]
            if i + 1 == len(index_points):
                end_index = number_vertices
            else:
                end_index = index_points[i + 1]
            array_filenames.append(
                f"intermediary_arrays/{self.skeleton_id_str}:{start_index}-{end_index}"
            )
        array_files = cf.get(array_filenames)
        # Dict to make sure arrays are concatenated in correct order
        array_dict = {}
        for array_file in array_files:
            array_dict[array_file["path"]] = pickle.loads(
                array_file["content"])
        array_arrays = []
        for array_filename in array_filenames:
            array_arrays.append(array_dict[array_filename])
        array_arrays = np.concatenate(array_arrays)
        # Restore the correct order of the vertices
        restore_sort = vertex_sort.argsort()
        new_vertices = array_arrays[restore_sort]
        new_skeleton = Skeleton(
            vertices=new_vertices,
            edges=skeleton.edges,
            radii=skeleton.radius,
            vertex_types=skeleton.vertex_types,
            space=skeleton.space,
            transform=skeleton.transform,
        )
        cf.put(
            path=self.skeleton_id_str,
            content=new_skeleton.to_precomputed(),
            compress="gzip",
        )
コード例 #12
0
    def task_generator(self):
        chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
                bcube=self.bcube,
                chunk_xy=self.chunk_xy,
                chunk_z=1,
                mip=self.mip)

        tasks = [MergeCopyTask(src_stack=self.src_stack,
                          dst_stack=self.dst_stack,
                          mip=self.mip,
                          bcube=input_chunk,
                          z_list=self.z_list) for input_chunk in chunks]
        corgie_logger.info(f"Yielding copy tasks for bcube: {self.bcube}, MIP: {self.mip}")

        yield tasks
コード例 #13
0
    def execute(self):
        corgie_logger.info(
            f"Normalizing {self.src_layer} at MIP{self.mip}, region: {self.bcube}"
        )
        mean_data = self.mean_layer.read(self.bcube, mip=self.stats_mip)
        var_data = self.var_layer.read(self.bcube, mip=self.stats_mip)

        src_data = self.src_layer.read(self.bcube, mip=self.mip)
        mask_data = helpers.read_mask_list(mask_list=self.mask_layers,
                                           bcube=self.bcube,
                                           mip=self.mip)

        dst_data = (src_data - mean_data) / var_data.sqrt()
        if mask_data is not None:
            dst_data[mask_data] = self.mask_value
        self.dst_layer.write(dst_data, self.bcube, mip=self.mip)
コード例 #14
0
def filter_skeletons(
            ctx,
            src_folder,
            dst_folder,
            ids,
            bad_sections,
            ids_filepath,
            z_start,
            z_end
        ):
    scheduler = ctx.obj["scheduler"]

    corgie_logger.debug("Setting up layers...")

    skeleton_ids = ids
    if ids_filepath is not None:
        skeleton_ids = []
        with open(ids_filepath, "r") as f:
            line = f.readline()
            while line:
                skeleton_ids.append(int(line))
                line = f.readline()

    if len(skeleton_ids) == 0:
        skeleton_ids = None
    else:
        skeleton_ids = list(skeleton_ids)


    transform_skeletons_job = FilterSkeletonsJob(
        src_path=src_folder,
        dst_path=dst_folder,
        skeleton_ids=skeleton_ids,
        bad_sections=bad_sections,
        z_start=z_start,
        z_end=z_end
    )

    scheduler.register_job(
        transform_skeletons_job,
        job_name="Filtering skeletons in {}".format(src_folder),
    )

    scheduler.execute_until_completion()
    result_report = f"Filtered skeletons stored at {dst_folder}. "
    corgie_logger.info(result_report)
コード例 #15
0
def downsample_by_spec(ctx, src_layer_spec, spec_path, dst_layer_spec,
                       mip_start, mip_end, chunk_xy, chunk_z, mips_per_task,
                       start_coord, end_coord, coord_mip):
    scheduler = ctx.obj['scheduler']
    corgie_logger.debug("Setting up Source and Destination layers...")

    src_layer = create_layer_from_spec(src_layer_spec,
                                       caller_name='src layer',
                                       readonly=True)

    with open(spec_path, 'r') as f:
        spec = set(json.load(f))

    if dst_layer_spec is None:
        corgie_logger.info(
            "Destination layer not specified. Using Source layer "
            "as Destination.")
        dst_layer = src_layer
        dst_layer.readonly = False
    else:
        dst_layer = create_layer_from_spec(dst_layer_spec,
                                           caller_name='dst_layer layer',
                                           readonly=False,
                                           reference=src_layer,
                                           chunk_z=chunk_z,
                                           overwrite=True)
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)
    for z in range(*bcube.z_range()):
        if z in spec:
            job_bcube = bcube.reset_coords(zs=z, ze=z + 1, in_place=False)
            downsample_job = DownsampleJob(src_layer=src_layer,
                                           dst_layer=dst_layer,
                                           mip_start=mip_start,
                                           mip_end=mip_end,
                                           bcube=job_bcube,
                                           chunk_xy=chunk_xy,
                                           chunk_z=chunk_z,
                                           mips_per_task=mips_per_task)

            # create scheduler and execute the job
            scheduler.register_job(downsample_job,
                                   job_name=f"Downsample {job_bcube}")
    scheduler.execute_until_completion()
    result_report = f"Downsampled {src_layer} from {mip_start} to {mip_end}. Result in {dst_layer}"
    corgie_logger.info(result_report)
コード例 #16
0
ファイル: create_skeletons.py プロジェクト: seung-lab/corgie
 def task_generator(self):
     chunks = self.seg_layer.break_bcube_into_chunks(
         bcube=self.bcube,
         chunk_xy=self.chunk_xy,
         chunk_z=self.chunk_z,
         mip=self.mip,
         readonly=True,
     )
     tasks = [
         SkeletonTask(
             self.seg_layer,
             self.dst_path,
             timestamp=self.timestamp,
             mip=self.mip,
             teasar_params=self.teasar_params,
             object_ids=self.object_ids,
             dust_threshold=self.dust_threshold,
             fix_branching=self.fix_branching,
             fix_borders=self.fix_borders,
             fix_avocados=self.fix_avocados,
             bcube=input_chunk,
         ) for input_chunk in chunks
     ]
     corgie_logger.info(
         f"Yielding skeletonization tasks for bcube: {self.bcube}, MIP: {self.mip}"
     )
     yield tasks
     yield scheduling.wait_until_done
     if self.single_merge_mode:
         merge_tasks = [
             MergeSkeletonTask(
                 self.dst_path,
                 self.mip,
                 self.dust_threshold,
                 self.tick_threshold,
                 str(object_id),
             ) for object_id in self.object_ids
         ]
         yield merge_tasks
     else:
         yield [
             MergeSkeletonTask(self.dst_path, self.mip, self.dust_threshold,
                               self.tick_threshold)
         ]
コード例 #17
0
ファイル: copy.py プロジェクト: scottwedge/corgie
    def task_generator(self):
        chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
            bcube=self.bcube,
            chunk_xy=self.chunk_xy,
            chunk_z=self.chunk_z,
            mip=self.mip)

        tasks = [
            CopyTask(self.src_stack,
                     self.dst_stack,
                     blackout_masks=self.blackout_masks,
                     copy_masks=self.copy_masks,
                     mip=self.mip,
                     bcube=input_chunk) for input_chunk in chunks
        ]
        corgie_logger.info(
            f"Yielding copy tasks for bcube: {self.bcube}, MIP: {self.mip}")

        yield tasks
コード例 #18
0
ファイル: copy.py プロジェクト: seung-lab/corgie
    def task_generator(self):
        chunks = self.dst_layer.break_bcube_into_chunks(
            bcube=self.bcube,
            chunk_xy=self.chunk_xy,
            chunk_z=self.chunk_z,
            mip=self.mip,
            return_generator=True,
        )

        tasks = (CopyLayerTask(
            self.src_layer,
            self.dst_layer,
            mip=self.mip,
            bcube=input_chunk,
        ) for input_chunk in chunks)
        corgie_logger.info(
            f"Yielding copy layer tasks for bcube: {self.bcube}, MIP: {self.mip}"
        )

        yield tasks
コード例 #19
0
ファイル: fill_nearest.py プロジェクト: seung-lab/corgie
    def task_generator(self):
        chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
            bcube=self.bcube,
            chunk_xy=self.chunk_xy,
            chunk_z=1,
            mip=self.mip,
            return_generator=True,
        )

        tasks = (FillNearestTask(
            self.src_stack,
            self.dst_stack,
            mip=self.mip,
            bcube=chunk,
            radius=self.radius,
        ) for chunk in chunks)
        corgie_logger.info(
            f"Yielding fill nearest tasks for bcube: {self.bcube}, MIP: {self.mip}"
        )

        yield tasks
コード例 #20
0
ファイル: render.py プロジェクト: scottwedge/corgie
    def task_generator(self):
        chunks = self.dst_stack.get_layers()[0].break_bcube_into_chunks(
            bcube=self.bcube,
            chunk_xy=self.chunk_xy,
            chunk_z=self.chunk_z,
            mip=self.mip)

        tasks = [
            RenderTask(self.src_stack,
                       self.dst_stack,
                       blackout_masks=self.blackout_masks,
                       render_masks=self.render_masks,
                       mip=self.mip,
                       pad=self.pad,
                       bcube=input_chunk,
                       additional_fields=self.additional_fields)
            for input_chunk in chunks
        ]
        corgie_logger.info(
            f"Yielding render tasks for bcube: {self.bcube}, MIP: {self.mip}")

        yield tasks
コード例 #21
0
ファイル: merge_render.py プロジェクト: seung-lab/corgie
    def execute(self):
        device = "cuda" if torch.cuda.is_available() else "cpu"
        padded_bcube = self.bcube.uncrop(self.pad, self.mip)
        for k, specs in enumerate(self.src_specs[::-1]):
            src_z = specs["src_z"]
            dst_z = self.bcube.z_range()[0]

            corgie_logger.info(f"Load fields for {padded_bcube}")
            # backwards compatible
            if not isinstance(specs["src_field"], list):
                specs["src_field"] = [specs["src_field"]]
            mask_layer = self.src_layers[str(specs["src_mask"])]

            field_ids = list(map(str, specs["src_field"]))
            corgie_logger.info(f"field ids={field_ids}")
            z_list = specs.get("src_field_z", [src_z] * len(field_ids))
            fields = FieldSet([self.src_layers[n] for n in field_ids])
            field = fields.read(
                bcube=padded_bcube, z_list=z_list, mip=self.mip, device=device
            )
            bcube = padded_bcube.reset_coords(zs=src_z, ze=src_z + 1, in_place=False)

            mask_trans = helpers.percentile_trans_adjuster(field)
            mask_trans = mask_trans.round_to_mip(self.mip, mask_layer.data_mip)
            corgie_logger.debug(f"mask_trans: {mask_trans}")

            mask_bcube = bcube.translate(
                x_offset=mask_trans.y, y_offset=mask_trans.x, mip=self.mip
            )

            corgie_logger.info(f"Load masks for {mask_bcube}")
            mask_id = specs["mask_id"]
            mask_layer.binarizer = helpers.Binarizer(["eq", mask_id])
            mask = mask_layer.read(bcube=mask_bcube, mip=self.mip, device=device)
            mask = residuals.res_warp_img(
                mask.float(), field - mask_trans.to_tensor(device=field.device)
            ).tensor()
            mask = (mask > 0.4).bool()
            cropped_mask = helpers.crop(mask, self.pad)

            relabel_id = torch.as_tensor(specs.get("relabel_id", k + 1), dtype=torch.uint8)
            if k == 0:
                dst_img = cropped_mask * relabel_id
                dst_img[~cropped_mask] = 0
            else:
                dst_img[cropped_mask] = cropped_mask[cropped_mask] * relabel_id

        self.dst_layer.write(dst_img.cpu(), bcube=self.bcube, mip=self.mip)
コード例 #22
0
ファイル: copy.py プロジェクト: seung-lab/corgie
def copy(
    ctx,
    src_layer_spec,
    dst_folder,
    copy_masks,
    blackout_masks,
    chunk_xy,
    chunk_z,
    start_coord,
    end_coord,
    coord_mip,
    mip,
    suffix,
    force_chunk_xy,
    force_chunk_z,
):

    scheduler = ctx.obj["scheduler"]
    if suffix is None:
        suffix = ""
    else:
        suffix = f"_{suffix}"

    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec,
                                       name="src",
                                       readonly=True)

    if not force_chunk_xy:
        force_chunk_xy = chunk_xy

    if not force_chunk_z:
        force_chunk_z = chunk_z

    dst_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=dst_folder,
        name="dst",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        force_chunk_xy=force_chunk_xy,
        force_chunk_z=force_chunk_z,
        overwrite=True,
    )

    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    copy_job = CopyJob(
        src_stack=src_stack,
        dst_stack=dst_stack,
        mip=mip,
        bcube=bcube,
        chunk_xy=chunk_xy,
        chunk_z=chunk_z,
        copy_masks=copy_masks,
        blackout_masks=blackout_masks,
    )
    # create scheduler and execute the job
    scheduler.register_job(copy_job, job_name="Copy {}".format(bcube))
    scheduler.execute_until_completion()

    result_report = (
        f"Copied layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. "
        f"to {[str(l) for l in dst_stack.get_layers_of_type('img')]}")
    corgie_logger.info(result_report)
コード例 #23
0
    def execute(self):
        corgie_logger.info(
            f"Starting transform skeleton vertices task for id {self.skeleton_id_str}"
        )

        skeleton = get_skeleton(self.src_path, self.skeleton_id_str)

        if self.vertex_sort:
            vertex_sort = skeleton.vertices[:, 2].argsort()
        else:
            vertex_sort = np.arange(0, len(skeleton.vertices))

        # How many vertices we will use at once to get a bcube to download from the vector field
        vertex_process_size = 50
        vertices_to_transform = skeleton.vertices[
            vertex_sort[self.start_vertex_index:self.end_vertex_index]]
        index_vertices = list(
            range(0, self.number_vertices, vertex_process_size))
        new_vertices = []
        for i in range(len(index_vertices)):
            if i + 1 == len(index_vertices):
                current_batch_vertices = vertices_to_transform[
                    index_vertices[i]:]
            else:
                current_batch_vertices = vertices_to_transform[
                    index_vertices[i]:index_vertices[i + 1]]
            field_resolution = np.array(
                self.vector_field_layer.resolution(self.field_mip))
            bcube = get_bcube_from_vertices(
                vertices=current_batch_vertices,
                resolution=field_resolution,
                mip=self.field_mip,
            )
            field_data = self.vector_field_layer.read(
                bcube=bcube, mip=self.field_mip).permute(2, 3, 0, 1)
            current_batch_vertices_to_mip = current_batch_vertices / field_resolution
            bcube_minpt = bcube.minpt(self.field_mip)
            field_indices = current_batch_vertices_to_mip.astype(
                np.int) - bcube_minpt
            vector_resolution = (
                self.vector_field_layer.resolution(0) * np.array([
                    2**(self.field_mip - self.vector_field_layer.data_mip),
                    2**(self.field_mip - self.vector_field_layer.data_mip),
                    1,
                ]) if self.mip0_field else self.vector_field_layer.resolution(
                    self.field_mip))
            vectors_to_add = []
            corgie_logger.info(f"{field_data.shape}, {field_indices.max(0)}")
            for i in range(len(field_data.shape) - 1):
                if field_indices.max(0)[i] >= field_data.shape[i]:
                    import pdb
                    pdb.set_trace()
            for cur_field_index in field_indices:
                vector_at_point = field_data[cur_field_index[0],
                                             cur_field_index[1],
                                             cur_field_index[2]]
                # Each vector is stored in [Y,X] format
                vectors_to_add.append([
                    int(vector_resolution[0] * vector_at_point[1].item()),
                    int(vector_resolution[1] * vector_at_point[0].item()),
                    0,
                ])
            vectors_to_add = np.array(vectors_to_add)
            current_batch_warped_vertices = current_batch_vertices + vectors_to_add
            new_vertices.append(current_batch_warped_vertices)

        new_vertices = np.concatenate(new_vertices)
        cf = CloudFiles(f"{self.dst_path}/intermediary_arrays/")
        cf.put(
            path=
            f"{self.skeleton_id_str}:{self.start_vertex_index}-{self.end_vertex_index}",
            content=pickle.dumps(new_vertices),
        )
コード例 #24
0
def normalize_by_spec(ctx, src_layer_spec, spec_path, dst_folder, stats_mip,
                      mip_start, mip_end, chunk_xy, chunk_z, start_coord,
                      end_coord, coord_mip, suffix, recompute_stats,
                      mask_value):
    if chunk_z != 1:
        raise NotImplemented("Compute Statistics command currently only \
                supports per-section statistics.")
    result_report = ""
    scheduler = ctx.obj['scheduler']

    if suffix is None:
        suffix = '_norm'
    else:
        suffix = f"_{suffix}"

    if stats_mip is None:
        stats_mip = mip_end

    with open(spec_path, 'r') as f:
        spec = set(json.load(f))

    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec,
                                       name='src',
                                       readonly=True)

    # dst_stack = stack.create_stack_from_reference(reference_stack=src_stack,
    #         folder=dst_folder, name="dst", types=["img"], readonly=False,
    #         suffix=suffix, overwrite=True)

    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    img_layers = src_stack.get_layers_of_type("img")
    mask_layers = src_stack.get_layers_of_type("mask")
    field_layers = src_stack.get_layers_of_type("field")
    assert len(field_layers) == 0

    for l in img_layers:
        mean_layer = l.get_sublayer(
            name=f"mean{suffix}",
            path=os.path.join(dst_folder, f"mean{suffix}"),
            layer_type="section_value",
        )

        var_layer = l.get_sublayer(name=f"var{suffix}",
                                   path=os.path.join(dst_folder,
                                                     f"var{suffix}"),
                                   layer_type="section_value")

        if recompute_stats:
            for z in range(*bcube.z_range()):
                if z in spec:
                    job_bcube = bcube.reset_coords(zs=z,
                                                   ze=z + 1,
                                                   in_place=False)
                    compute_stats_job = ComputeStatsJob(
                        src_layer=l,
                        mask_layers=mask_layers,
                        mean_layer=mean_layer,
                        var_layer=var_layer,
                        bcube=job_bcube,
                        mip=stats_mip,
                        chunk_xy=chunk_xy,
                        chunk_z=chunk_z)

                    # create scheduler and execute the job
                    scheduler.register_job(
                        compute_stats_job,
                        job_name=
                        f"Compute Stats. Layer: {l}, Bcube: {job_bcube}")
            scheduler.execute_until_completion()

        dst_layer = l.get_sublayer(name=f"{l.name}{suffix}",
                                   path=os.path.join(dst_folder, "img",
                                                     f"{l.name}{suffix}"),
                                   layer_type=l.get_layer_type(),
                                   dtype='float32',
                                   overwrite=True)

        for z in range(*bcube.z_range()):
            if z in spec:
                job_bcube = bcube.reset_coords(zs=z, ze=z + 1, in_place=False)
                result_report += f"Normalized {l} -> {dst_layer}\n"
                for mip in range(mip_start, mip_end + 1):
                    normalize_job = NormalizeJob(src_layer=l,
                                                 mask_layers=mask_layers,
                                                 dst_layer=deepcopy(dst_layer),
                                                 mean_layer=mean_layer,
                                                 var_layer=var_layer,
                                                 stats_mip=stats_mip,
                                                 mip=mip,
                                                 bcube=job_bcube,
                                                 chunk_xy=chunk_xy,
                                                 chunk_z=chunk_z,
                                                 mask_value=mask_value)

                    # create scheduler and execute the job
                    scheduler.register_job(
                        normalize_job,
                        job_name=f"Normalize {job_bcube}, MIP {mip}")
    scheduler.execute_until_completion()
    corgie_logger.info(result_report)
コード例 #25
0
    def task_generator(self):
        skeletons = self.get_skeletons(self.src_path)
        transform_vertex_tasks = []
        generate_new_skeleton_tasks = []
        for skeleton_id_str, skeleton in skeletons.items():
            number_vertices = len(skeleton.vertices)
            # Vector field chunks are typically chunked by 1 in z, so we process
            # the skeleton's vertices in z-order for maximum download efficiency.
            index_points = list(
                range(0, number_vertices, self.task_vertex_size))
            for i in range(len(index_points)):
                start_vertex_index = index_points[i]
                if i + 1 == len(index_points):
                    end_vertex_index = number_vertices
                else:
                    end_vertex_index = index_points[i + 1]
                transform_vertex_tasks.append(
                    TransformSkeletonVerticesTask(
                        vector_field_layer=self.vector_field_layer,
                        skeleton_id_str=skeleton_id_str,
                        src_path=self.src_path,
                        dst_path=self.dst_path,
                        field_mip=self.field_mip,
                        start_vertex_index=start_vertex_index,
                        end_vertex_index=end_vertex_index,
                        vertex_sort=True,
                        mip0_field=self.mip0_field,
                    ))
            generate_new_skeleton_tasks.append(
                GenerateNewSkeletonTask(
                    skeleton_id_str=skeleton_id_str,
                    src_path=self.src_path,
                    dst_path=self.dst_path,
                    task_vertex_size=self.task_vertex_size,
                    vertex_sort=True,
                ))
        corgie_logger.info(
            f"Yielding transform skeleton vertex tasks for skeletons in {self.src_path}"
        )
        yield transform_vertex_tasks
        yield scheduling.wait_until_done
        corgie_logger.info(f"Generating skeletons to {self.dst_path}")
        yield generate_new_skeleton_tasks
        yield scheduling.wait_until_done
        # TODO: Delete intermediary vertex files

        if self.skeleton_length_file is not None:
            new_skeletons = self.get_skeletons(self.dst_path)
            corgie_logger.info(
                f"Calculating skeleton lengths to {self.skeleton_length_file}")
            with open(self.skeleton_length_file, "w") as f:
                f.write(
                    "Skeleton id, Original Skeleton Length (nm), New Skeleton Length (nm)\n"
                )
                for skeleton_id_str in skeletons:
                    original_skeleton = skeletons[skeleton_id_str]
                    new_skeleton = new_skeletons[skeleton_id_str]
                    verts = new_skeleton.vertices
                    vl = [verts[i] for i in range(verts.shape[0])]
                    f.write(
                        f"{skeleton_id_str},{int(original_skeleton.cable_length())},{int(new_skeleton.cable_length())}\n"
                    )
コード例 #26
0
ファイル: align.py プロジェクト: seung-lab/corgie
def align(
    ctx,
    src_layer_spec,
    dst_folder,
    render_pad,
    render_chunk_xy,
    processor_spec,
    pad,
    crop,
    processor_mip,
    chunk_xy,
    start_coord,
    end_coord,
    coord_mip,
    bad_starter_path,
    block_size,
    stitch_size,
    vote_dist,
    consensus_threshold,
    blur_sigma,
    kernel_size,
    blend_xy,
    force_chunk_xy,
    suffix,
    seethrough_spec,
    seethrough_limit,
    seethrough_spec_mip,
    decay_dist,
    blur_rate,
    restart_stage,
    restart_suffix,
):

    scheduler = ctx.obj["scheduler"]

    if suffix is None:
        suffix = "_aligned"
    else:
        suffix = f"_{suffix}"
    if (restart_suffix is None) or (restart_stage == 0):
        restart_suffix = suffix

    if crop is None:
        crop = pad

    corgie_logger.debug("Setting up layers...")
    # TODO: store stitching images in layer other than even & odd
    if vote_dist + stitch_size - 2 >= block_size:
        raise exceptions.CorgieException(
            "block_size too small for stitching + voting requirements (stitch_size + vote_dist)"
        )

    corgie_logger.debug("Setting up layers...")

    src_stack = create_stack_from_spec(src_layer_spec,
                                       name="src",
                                       readonly=True)
    src_stack.folder = dst_folder

    if force_chunk_xy is None:
        force_chunk_xy = chunk_xy

    dst_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=dst_folder,
        name="dst",
        types=["img", "mask"],
        readonly=False,
        suffix=restart_suffix,
        force_chunk_xy=force_chunk_xy,
        overwrite=True,
    )

    even_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=os.path.join(dst_folder, "even"),
        name="even",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        force_chunk_xy=force_chunk_xy,
        overwrite=True,
    )

    odd_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=os.path.join(dst_folder, "odd"),
        name="odd",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        force_chunk_xy=force_chunk_xy,
        overwrite=True,
    )

    corgie_logger.debug("Done!")

    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    corgie_logger.debug("Calculating blocks...")
    skip_list = []
    if bad_starter_path is not None:
        with open(bad_starter_path) as f:
            line = f.readline()
            while line:
                skip_list.append(int(line))
                line = f.readline()
    blocks = get_blocks(
        start=bcube.z_range()[0],
        stop=bcube.z_range()[1],
        block_size=block_size,
        block_overlap=1,
        skip_list=skip_list,
        src_stack=src_stack,
        even_stack=even_stack,
        odd_stack=odd_stack,
    )
    stitch_blocks = [b.overlap(stitch_size) for b in blocks[1:]]
    corgie_logger.debug("All Blocks")
    for block, stitch_block in zip(blocks, [None] + stitch_blocks):
        corgie_logger.debug(block)
        corgie_logger.debug(f"Stitch {stitch_block}")
        corgie_logger.debug("\n")

    max_blur_mip = (math.ceil(math.log(decay_dist * blur_rate + 1, 2)) +
                    processor_mip[-1])
    corgie_logger.debug(f"Max blur mip for stitching field: {max_blur_mip}")

    # Set all field names, adjusting for restart suffix
    block_field_name = f"field{suffix}"
    stitch_estimated_suffix = f"_stitch_estimated{suffix}"
    stitch_estimated_name = f"field{stitch_estimated_suffix}"
    stitch_corrected_name = f"stitch_corrected{suffix}"
    stitch_corrected_field = None
    composed_name = f"composed{suffix}"
    if restart_stage <= 2:
        stitch_estimated_suffix = f"_stitch_estimated{restart_suffix}"
        stitch_estimated_name = f"field{stitch_estimated_suffix}"
        stitch_corrected_name = f"stitch_corrected{restart_suffix}"
    if restart_stage <= 3:
        composed_name = f"composed{restart_suffix}"

    render_method = helpers.PartialSpecification(
        f=RenderJob,
        pad=render_pad,
        chunk_xy=render_chunk_xy,
        chunk_z=1,
        render_masks=False,
    )

    cf_method = helpers.PartialSpecification(
        f=ComputeFieldJob,
        pad=pad,
        crop=crop,
        processor_mip=processor_mip,
        processor_spec=processor_spec,
        chunk_xy=chunk_xy,
        blend_xy=blend_xy,
        chunk_z=1,
    )
    if seethrough_spec != tuple():
        assert seethrough_spec_mip is not None
        seethrough_method = helpers.PartialSpecification(
            f=SeethroughCompareJob,
            mip=seethrough_spec_mip,
            processor_spec=seethrough_spec,
            chunk_xy=chunk_xy,
            pad=pad,
            crop=pad,
            seethrough_limit=seethrough_limit,
        )
    else:
        seethrough_method = None

    #restart_stage = 4
    #import pdb; pdb.set_trace()
    if restart_stage == 0:
        corgie_logger.debug("Aligning blocks...")
        for block in blocks:
            block_bcube = block.get_bcube(bcube)
            # Use copies of src & dst so that aligning the stitching blocks
            # is not affected by these block fields.
            # Copying also allows local compute to not modify objects for other tasks
            align_block_job_forv = AlignBlockJob(
                src_stack=deepcopy(block.src_stack),
                dst_stack=deepcopy(block.dst_stack),
                bcube=block_bcube,
                render_method=render_method,
                cf_method=cf_method,
                vote_dist=vote_dist,
                seethrough_method=seethrough_method,
                suffix=suffix,
                copy_start=True,
                use_starters=True,
                backward=False,
                consensus_threshold=consensus_threshold,
                blur_sigma=blur_sigma,
                kernel_size=kernel_size,
            )
            scheduler.register_job(
                align_block_job_forv,
                job_name=f"Forward Align {block} {block_bcube}",
            )

        scheduler.execute_until_completion()
        corgie_logger.debug("Done!")

    if restart_stage <= 1:
        corgie_logger.debug("Aligning stitching blocks...")
        for stitch_block in stitch_blocks:
            block_bcube = stitch_block.get_bcube(bcube)
            # These blocks will have block-aligned images, but not
            # the block_fields that warped them.
            align_block_job_forv = AlignBlockJob(
                src_stack=deepcopy(stitch_block.src_stack),
                dst_stack=deepcopy(stitch_block.dst_stack),
                bcube=block_bcube,
                render_method=render_method,
                cf_method=cf_method,
                vote_dist=vote_dist,
                seethrough_method=seethrough_method,
                suffix=stitch_estimated_suffix,
                copy_start=False,
                use_starters=False,
                backward=False,
                consensus_threshold=consensus_threshold,
                blur_sigma=blur_sigma,
                kernel_size=kernel_size,
            )
            scheduler.register_job(
                align_block_job_forv,
                job_name=f"Stitch Align {stitch_block} {block_bcube}",
            )

        scheduler.execute_until_completion()
        corgie_logger.debug("Done!")

    # Add in the stitch_estimated fields that were just created above
    even_stack.create_sublayer(
        stitch_estimated_name,
        layer_type="field",
        overwrite=False,
    )
    odd_stack.create_sublayer(
        stitch_estimated_name,
        layer_type="field",
        overwrite=False,
    )
    if restart_stage <= 2:
        if stitch_size > 1:
            corgie_logger.debug("Voting over stitching blocks")
            stitch_corrected_field = dst_stack.create_sublayer(
                stitch_corrected_name, layer_type="field", overwrite=True)
            for stitch_block in stitch_blocks:
                stitch_estimated_field = stitch_block.dst_stack[
                    stitch_estimated_name]
                block_bcube = bcube.reset_coords(
                    zs=stitch_block.start,
                    ze=stitch_block.start + 1,
                    in_place=False,
                )
                z_offsets = [
                    z - block_bcube.z_range()[0]
                    for z in range(stitch_block.start, stitch_block.stop)
                ]
                vote_stitch_job = VoteJob(
                    input_fields=[stitch_estimated_field],
                    output_field=stitch_corrected_field,
                    chunk_xy=chunk_xy,
                    bcube=block_bcube,
                    z_offsets=z_offsets,
                    mip=processor_mip[-1],
                    consensus_threshold=consensus_threshold,
                    blur_sigma=blur_sigma,
                    kernel_size=kernel_size,
                )
                scheduler.register_job(
                    vote_stitch_job,
                    job_name=f"Stitching Vote {stitch_block} {block_bcube}",
                )

            scheduler.execute_until_completion()
            corgie_logger.debug("Done!")

        for stitch_block in stitch_blocks:
            block_bcube = bcube.reset_coords(zs=stitch_block.start,
                                             ze=stitch_block.start + 1,
                                             in_place=False)
            field_to_downsample = stitch_block.dst_stack[stitch_estimated_name]
            if stitch_corrected_field is not None:
                field_to_downsample = stitch_corrected_field
            # Hack for fafb
            field_info = field_to_downsample.get_info()
            for scale in field_info['scales']:
                scale['chunk_sizes'][-1][-1] = 1
                scale['encoding'] = 'raw'
            field_to_downsample.cv.store_info(field_info)
            field_to_downsample.cv.fetch_info()
            downsample_field_job = DownsampleJob(
                src_layer=field_to_downsample,
                mip_start=processor_mip[-1],
                mip_end=max_blur_mip,
                bcube=block_bcube,
                chunk_xy=
                chunk_xy,  # TODO: This probably needs to be modified at highest mips
                chunk_z=1,
                mips_per_task=2,
            )
            scheduler.register_job(
                downsample_field_job,
                job_name=f"Downsample stitching field {block_bcube}",
            )
        scheduler.execute_until_completion()
        corgie_logger.debug("Done!")

    # Add in the block-align fields
    even_stack.create_sublayer(
        block_field_name,
        layer_type="field",
        overwrite=False,
    )
    odd_stack.create_sublayer(
        block_field_name,
        layer_type="field",
        overwrite=False,
    )
    composed_field = dst_stack.create_sublayer(composed_name,
                                               layer_type="field",
                                               overwrite=True)
    if (restart_stage > 2) and (stitch_size > 1):
        stitch_corrected_field = dst_stack.create_sublayer(
            stitch_corrected_name, layer_type="field", overwrite=False)
    if restart_stage <= 3:
        corgie_logger.debug("Stitching blocks...")
        for block, stitch_block in zip(blocks[1:], stitch_blocks):
            block_bcube = block.broadcastable().get_bcube(bcube)
            block_list = block.get_neighbors(dist=decay_dist)
            corgie_logger.debug(f"src_block: {block}")
            corgie_logger.debug(f"influencing blocks: {block_list}")
            z_list = [b.stop for b in block_list]
            # stitch_corrected_field used if there is multi-section block overlap,
            # which requires voting to produce a corrected field.
            # If there is only single-section block overlap, then use
            # stitch_estimated_fields from each stitch_block
            if stitch_corrected_field is not None:
                stitching_fields = [stitch_corrected_field]
            else:
                # Order with furthest block first (convention of FieldSet).
                stitching_fields = [
                    stitch_block.dst_stack[stitch_estimated_name],
                    stitch_block.src_stack[stitch_estimated_name],
                ]

            broadcast_job = BroadcastJob(
                block_field=block.dst_stack[block_field_name],
                stitching_fields=stitching_fields,
                output_field=composed_field,
                chunk_xy=chunk_xy,
                bcube=block_bcube,
                pad=pad,
                z_list=z_list,
                mip=processor_mip[-1],
                decay_dist=decay_dist,
                blur_rate=blur_rate,
            )
            scheduler.register_job(broadcast_job,
                                   job_name=f"Broadcast {block} {block_bcube}")

        scheduler.execute_until_completion()
        corgie_logger.debug("Done!")

        if len(blocks) > 1:
            block_bcube = blocks[0].get_bcube(bcube)
            copy_job = CopyLayerJob(
                src_layer=even_stack[block_field_name],
                dst_layer=composed_field,
                mip=processor_mip[-1],
                bcube=block_bcube,
                chunk_xy=chunk_xy,
                chunk_z=1,
            )
            scheduler.register_job(
                copy_job,
                job_name=f"Copy first block_field to composed_field location")
            scheduler.execute_until_completion()
            corgie_logger.debug("Done!")

    if restart_stage <= 4:
        if len(blocks) == 1:
            block_bcube = blocks[0].get_bcube(bcube)
            render_job = RenderJob(
                src_stack=src_stack,
                dst_stack=dst_stack,
                mips=processor_mip[-1],
                pad=pad,
                bcube=block_bcube,
                chunk_xy=chunk_xy,
                chunk_z=1,
                render_masks=True,
                blackout_masks=False,
                additional_fields=[even_stack[block_field_name]],
            )
            scheduler.register_job(
                render_job, job_name=f"Render first block {block_bcube}")
        else:
            block_bcube = bcube.reset_coords(zs=blocks[0].start,
                                             ze=blocks[-1].stop,
                                             in_place=False)
            render_job = RenderJob(
                src_stack=src_stack,
                dst_stack=dst_stack,
                mips=processor_mip[-1],
                pad=pad,
                bcube=block_bcube,
                chunk_xy=chunk_xy,
                chunk_z=1,
                render_masks=True,
                blackout_masks=True,
                additional_fields=[composed_field],
            )
            scheduler.register_job(render_job,
                                   job_name=f"Render all blocks {block_bcube}")
        scheduler.execute_until_completion()
        corgie_logger.debug("Done!")

    result_report = (
        f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. "
        f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}")
    corgie_logger.info(result_report)
コード例 #27
0
def seethrough_block(
    ctx,
    src_layer_spec,
    dst_folder,
    chunk_xy,
    start_coord,
    end_coord,
    coord_mip,
    suffix,
    seethrough_spec,
    seethrough_limit,
    seethrough_spec_mip,
    force_chunk_z=1,
):
    scheduler = ctx.obj["scheduler"]

    if suffix is None:
        suffix = "_seethrough"
    else:
        suffix = f"_{suffix}"

    crop, pad = 0, 0
    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec,
                                       name="src",
                                       readonly=True)
    src_stack.folder = dst_folder
    dst_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=dst_folder,
        name="dst",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        overwrite=True,
        force_chunk_z=force_chunk_z,
    )
    render_method = helpers.PartialSpecification(
        f=RenderJob,
        pad=pad,
        chunk_xy=chunk_xy,
        chunk_z=1,
        render_masks=False,
    )
    seethrough_method = helpers.PartialSpecification(
        f=SeethroughCompareJob,
        mip=seethrough_spec_mip,
        processor_spec=seethrough_spec,
        chunk_xy=chunk_xy,
        pad=pad,
        crop=pad,
        seethrough_limit=seethrough_limit,
    )
    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    seethrough_block_job = SeethroughBlockJob(
        src_stack=src_stack,
        dst_stack=dst_stack,
        bcube=bcube,
        render_method=render_method,
        seethrough_method=seethrough_method,
        suffix=suffix,
    )
    # create scheduler and execute the job
    scheduler.register_job(seethrough_block_job,
                           job_name="Seethrough Block {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = (
        f"Rendered layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. "
        f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}")
    corgie_logger.info(result_report)
コード例 #28
0
ファイル: merge_render.py プロジェクト: seung-lab/corgie
    def execute(self):
        device = "cuda" if torch.cuda.is_available() else "cpu"

        # Field padding
        padded_bcube = self.bcube.uncrop(self.pad, self.mip)
        for k, specs in enumerate(self.src_specs[::-1]):
            src_z = specs["src_z"]
            dst_z = self.bcube.z_range()[0]

            corgie_logger.info(f"Load fields for {padded_bcube}")
            # backwards compatible
            if not isinstance(specs["src_field"], list):
                specs["src_field"] = [specs["src_field"]]
            mask_layer = self.src_layers[str(specs["src_mask"])]

            field_ids = list(map(str, specs["src_field"]))
            corgie_logger.info(f"field ids={field_ids}")
            z_list = specs.get("src_field_z", [src_z] * len(field_ids))
            fields = FieldSet([self.src_layers[n] for n in field_ids])
            field = fields.read(
                bcube=padded_bcube, z_list=z_list, mip=self.mip, device=device
            )
            bcube = padded_bcube.reset_coords(zs=src_z, ze=src_z + 1, in_place=False)

            # Extend image/mask cutout to account for field spread
            render_pad = int((field.max_vector() - field.min_vector()).max().ceil().tensor().item())
            snap_factor = 2 ** (max(self.mip, mask_layer.data_mip) - self.mip)
            render_pad = math.ceil(render_pad / snap_factor) * snap_factor
            render_pad = min(render_pad, 4096)  # Safety

            render_bcube = bcube.uncrop(render_pad, self.mip)
            corgie_logger.debug(f"render_pad: {render_pad}")

            # Move image/mask cutout to account for field drift
            img_trans = helpers.percentile_trans_adjuster(field)
            mask_trans = img_trans.round_to_mip(self.mip, mask_layer.data_mip)
            corgie_logger.debug(f"img_trans: {img_trans} | mask_trans: {mask_trans}")

            img_bcube = render_bcube.translate(
                x_offset=img_trans.y, y_offset=img_trans.x, mip=self.mip
            )
            mask_bcube = render_bcube.translate(
                x_offset=mask_trans.y, y_offset=mask_trans.x, mip=self.mip
            )

            if render_pad > 0:
                field = torch.nn.functional.pad(field, [render_pad, render_pad, render_pad, render_pad], mode='replicate')

            corgie_logger.info(f"Load masks for {mask_bcube}")
            mask_id = specs["mask_id"]
            mask_layer.binarizer = helpers.Binarizer(["eq", mask_id])
            mask = mask_layer.read(bcube=mask_bcube, mip=self.mip, device=device)
            mask = residuals.res_warp_img(
                mask.float(), field - mask_trans.to_tensor(device=field.device)
            ).tensor()
            mask = (mask > 0.4).bool()
            cropped_mask = helpers.crop(mask, self.pad + render_pad)

            corgie_logger.info(f"Load image for {img_bcube}")
            if cropped_mask.sum() == 0:
                cropped_img = torch.zeros_like(cropped_mask, dtype=torch.float)
            else:
                img_layer = self.src_layers[str(specs["src_img"])]
                img = img_layer.read(bcube=img_bcube, mip=self.mip, device=device)
                img = residuals.res_warp_img(
                    img.float(), field - img_trans.to_tensor(device=field.device)
                )
                cropped_img = helpers.crop(img, self.pad + render_pad)

            # write to composite image
            if k == 0:
                dst_img = cropped_img
                dst_img[~cropped_mask] = 0
            else:
                dst_img[cropped_mask] = cropped_img[cropped_mask]

        self.dst_layer.write(dst_img.cpu(), bcube=self.bcube, mip=self.mip)
コード例 #29
0
def align_block(
    ctx,
    src_layer_spec,
    dst_folder,
    vote_dist,
    render_pad,
    render_chunk_xy,
    processor_spec,
    pad,
    crop,
    processor_mip,
    chunk_xy,
    start_coord,
    end_coord,
    coord_mip,
    blend_xy,
    force_chunk_xy,
    suffix,
    copy_start,
    use_starters,
    seethrough_spec,
    seethrough_limit,
    seethrough_spec_mip,
    mode,
    chunk_z=1,
):
    scheduler = ctx.obj["scheduler"]

    if suffix is None:
        suffix = "_aligned"
    else:
        suffix = f"_{suffix}"

    if crop is None:
        crop = pad
    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec, name="src", readonly=True)
    src_stack.folder = dst_folder

    force_chunk_xy = chunk_xy if force_chunk_xy else None
    dst_stack = stack.create_stack_from_reference(
        reference_stack=src_stack,
        folder=dst_folder,
        name="dst",
        types=["img", "mask"],
        readonly=False,
        suffix=suffix,
        force_chunk_xy=force_chunk_xy,
        overwrite=True,
    )

    render_method = helpers.PartialSpecification(
        f=RenderJob,
        pad=render_pad,
        chunk_xy=render_chunk_xy,
        chunk_z=1,
        render_masks=False,
    )

    if seethrough_spec != tuple():
        assert seethrough_spec_mip is not None

        seethrough_method = helpers.PartialSpecification(
            f=SeethroughCompareJob,
            mip=seethrough_spec_mip,
            processor_spec=seethrough_spec,
            chunk_xy=chunk_xy,
            pad=pad,
            crop=pad,
            seethrough_limit=seethrough_limit,
        )
    else:
        seethrough_method = None

    cf_method = helpers.PartialSpecification(
        f=ComputeFieldJob,
        pad=pad,
        crop=crop,
        processor_mip=processor_mip,
        processor_spec=processor_spec,
        chunk_xy=chunk_xy,
        blend_xy=blend_xy,
        chunk_z=1,
    )

    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    if mode == "bidirectional":
        z_mid = (bcube.z_range()[1] + bcube.z_range()[0]) // 2
        bcube_back = bcube.reset_coords(ze=z_mid, in_place=False)
        bcube_forv = bcube.reset_coords(zs=z_mid, in_place=False)

        align_block_job_back = AlignBlockJob(
            src_stack=src_stack,
            dst_stack=dst_stack,
            bcube=bcube_back,
            render_method=render_method,
            cf_method=cf_method,
            seethrough_method=seethrough_method,
            suffix=suffix,
            copy_start=copy_start,
            backward=True,
            vote_dist=vote_dist,
            use_starters=use_starters,
        )
        scheduler.register_job(
            align_block_job_back,
            job_name="Backward Align Block {}".format(bcube),
        )

        align_block_job_forv = AlignBlockJob(
            src_stack=src_stack,
            dst_stack=deepcopy(dst_stack),
            bcube=bcube_forv,
            render_method=render_method,
            cf_method=cf_method,
            seethrough_method=seethrough_method,
            suffix=suffix,
            copy_start=True,
            backward=False,
            vote_dist=vote_dist,
            use_starters=use_starters,
        )
        scheduler.register_job(
            align_block_job_forv,
            job_name="Forward Align Block {}".format(bcube),
        )
    else:
        align_block_job = AlignBlockJob(
            src_stack=src_stack,
            dst_stack=dst_stack,
            bcube=bcube,
            render_method=render_method,
            cf_method=cf_method,
            seethrough_method=seethrough_method,
            suffix=suffix,
            copy_start=copy_start,
            backward=mode == "backward",
            vote_dist=vote_dist,
            use_starters=use_starters,
        )

        # create scheduler and execute the job
        scheduler.register_job(align_block_job, job_name="Align Block {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = (
        f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. "
        f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}"
    )
    corgie_logger.info(result_report)
コード例 #30
0
ファイル: align_block.py プロジェクト: scottwedge/corgie
def align_block(ctx, src_layer_spec, tgt_layer_spec, dst_folder, render_pad, render_chunk_xy,
        processor_spec, pad, crop, processor_mip, chunk_xy, start_coord, end_coord, coord_mip,
        suffix, copy_start, mode, chunk_z=1):
    scheduler = ctx.obj['scheduler']

    if suffix is None:
        suffix = '_aligned'
    else:
        suffix = f"_{suffix}"

    if crop is None:
        crop = pad
    corgie_logger.debug("Setting up layers...")
    src_stack = create_stack_from_spec(src_layer_spec,
            name='src', readonly=True)

    tgt_stack = create_stack_from_spec(tgt_layer_spec,
            name='tgt', readonly=True, reference=src_stack)

    dst_stack = stack.create_stack_from_reference(reference_stack=src_stack,
            folder=dst_folder, name="dst", types=["img", "mask"], readonly=False,
            suffix=suffix)

    render_method = helpers.PartialSpecification(
            f=RenderJob,
            pad=render_pad,
            chunk_xy=render_chunk_xy,
            chunk_z=1,
            blackout_masks=False,
            render_masks=True,
            mip=min(processor_mip)
            )

    cf_method = helpers.PartialSpecification(
            f=ComputeFieldJob,
            pad=pad,
            crop=crop,
            processor_mip=processor_mip,
            processor_spec=processor_spec,
            chunk_xy=chunk_xy,
            chunk_z=1
            )

    bcube = get_bcube_from_coords(start_coord, end_coord, coord_mip)

    if mode == 'bidirectional':
        z_mid = (bcube.z_range()[1] + bcube.z_range()[0]) // 2
        bcube_back = bcube.reset_coords(ze=z_mid, in_place=False)
        bcube_forv = bcube.reset_coords(zs=z_mid, in_place=False)

        align_block_job_back = AlignBlockJob(src_stack=src_stack,
                                    tgt_stack=tgt_stack,
                                    dst_stack=dst_stack,
                                    bcube=bcube_back,
                                    render_method=render_method,
                                    cf_method=cf_method,
                                    suffix=suffix,
                                    copy_start=copy_start,
                                    backward=True)
        scheduler.register_job(align_block_job_back, job_name="Backward Align Block {}".format(bcube))

        align_block_job_forv = AlignBlockJob(src_stack=src_stack,
                                    tgt_stack=tgt_stack,
                                    dst_stack=deepcopy(dst_stack),
                                    bcube=bcube_forv,
                                    render_method=render_method,
                                    cf_method=cf_method,
                                    suffix=suffix,
                                    copy_start=True,
                                    backward=False)
        scheduler.register_job(align_block_job_forv, job_name="Forward Align Block {}".format(bcube))
    else:
        align_block_job = AlignBlockJob(src_stack=src_stack,
                                        tgt_stack=tgt_stack,
                                        dst_stack=dst_stack,
                                        bcube=bcube,
                                        render_method=render_method,
                                        cf_method=cf_method,
                                        suffix=suffix,
                                        copy_start=copy_start,
                                        backward=mode=='backward')

        # create scheduler and execute the job
        scheduler.register_job(align_block_job, job_name="Align Block {}".format(bcube))

    scheduler.execute_until_completion()
    result_report = f"Aligned layers {[str(l) for l in src_stack.get_layers_of_type('img')]}. " \
            f"Results in {[str(l) for l in dst_stack.get_layers_of_type('img')]}"
    corgie_logger.info(result_report)