示例#1
0
def export_meshes(xml_path,
                  table_path,
                  cell_ids,
                  out_folder,
                  scale,
                  resolution=None,
                  n_jobs=16):
    os.makedirs(out_folder, exist_ok=True)

    if resolution is None:
        resolution = get_resolution(xml_path, 0)
        if scale > 0:
            resolution = [re * 2**scale for re in resolution]

    # load the segmentation dataset
    path = get_data_path(xml_path, return_absolute_path=True)
    key = 'setup0/timepoint0/s%i' % scale
    f = z5py.File(path, 'r')
    ds = f[key]
    ds.n_threads = 8

    # load the default table to get the bounding boxes
    if table_path is None:
        bb_starts, bb_stops = None, None
    else:
        bb_starts, bb_stops = load_bounding_boxes(table_path, resolution)

    def _mesh(cell_id):
        out_path = os.path.join(out_folder, 'mesh_%i.obj' % cell_id)
        export_mesh(cell_id, ds, bb_starts, bb_stops, resolution, out_path)

    print("Computing meshes ...")
    with futures.ThreadPoolExecutor(n_jobs) as tp:
        list(tqdm(tp.map(_mesh, cell_ids), total=len(cell_ids)))
示例#2
0
def extract_defects(view=False):
    resolution = get_resolution(XML_PATH, setup_id=0)

    defect_raw = []
    defect_id = 0
    for coord, offsets in zip(COORDINATES, OFFSETS):
        coord = coord[::-1]
        coord = [int(co / re) for co, re in zip(coord, resolution)]
        for off in offsets:
            bb = (slice(coord[0] + off, coord[0] + off + 1), ) + tuple(
                slice(co - ha, co + ha) for co, ha in zip(coord[1:], HALO))
            with z5py.File(PATH, 'r') as f:
                ds = f['setup0/timepoint0/s0']
                raw = ds[bb]

            if view:
                with napari.gui_qt():
                    viewer = napari.Viewer()
                    viewer.add_image(raw)
            defect_id += 1

        defect_raw.append(raw)

    defect_raw = np.concatenate(defect_raw, axis=0)
    out_path = '/g/kreshuk/pape/Work/data/rompani/neuron_training_data/defects.h5'
    with h5py.File(out_path, 'a') as f:
        f.create_dataset('defect_sections/raw',
                         data=defect_raw,
                         compression='gzip')
示例#3
0
def make_n5_files(version):
    version_folder = os.path.join(ROOT, version)

    # default chunk size
    default_chunks = 3 * (128, )
    # special chunk sizes
    chunk_dict = {'sbem-6dpf-1-whole-raw': None}  # don't copy raw yet

    copied = []

    xmls = glob(os.path.join(version_folder, 'images', 'local', '*.xml'))
    for xml in xmls:
        name = os.path.splitext(os.path.split(xml)[1])[0]
        chunks = chunk_dict.get(name, default_chunks)
        # chunks None means we skip copying for now
        if chunks is None:
            continue

        h5_path = get_data_path(xml, return_absolute_path=True)
        n5_path = os.path.splitext(h5_path)[0] + '.n5'
        copied.append(h5_path)
        if os.path.exists(n5_path):
            continue

        # load resolution from xml
        resolution = get_resolution(xml, 0)
        copy_to_bdv_n5(h5_path, n5_path, chunks, resolution)

    return copied
def get_bounding_box(bid, halo):
    resolution = get_resolution(XML_PATH, setup_id=0)

    coord = COORDINATES[bid][::-1]
    coord = [int(co / re) for co, re in zip(coord, resolution)]

    bb = tuple(slice(co - ha, co + ha) for co, ha in zip(coord, halo))
    return bb
示例#5
0
def get_resolution(source_metadata, dataset_folder):
    data_format, image_metadata = _load_image_metadata(source_metadata,
                                                       dataset_folder)
    if data_format.startswith("bdv"):
        resolution = bdv_metadata.get_resolution(image_metadata, setup_id=0)
    elif data_format.startswith("ome.zarr"):
        transforms = image_metadata["datasets"][0]["coordinateTransformations"]
        resolution = [1.0, 1.0, 1.0]
        for trafo in transforms:
            if trafo["type"] == "scale":
                resolution = trafo["scale"]
    else:
        raise ValueError(f"Unsupported data format {data_format}")
    return resolution
def update_shell():
    p_tiff = '../../EM-Prospr/shell_seg.tiff'
    p_res_n5 = '../data/rawdata/sbem-6dpf-1-whole-segmented-resin.n5'
    p_res_xml = '../data/rawdata/sbem-6dpf-1-whole-segmented-resin.xml'

    scale_factors = get_scale_factors(p_res_n5, 0)[1:]
    resolution = get_resolution(p_res_xml, 0)

    scale_factors = [[2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2]]

    print(scale_factors)
    print(resolution)

    print("Load tiff ...")
    shell = np.asarray(imageio.volread(p_tiff))
    print(shell.shape)

    print("Write bdv")
    out_path = 'sbem-6dpf-1-whole-segmented-shell.n5'

    make_bdv(shell, out_path, downscale_factors=scale_factors, resolution=resolution, unit='micrometer',
             n_threads=8, chunks=(96,) * 3, convert_dtype=False)
示例#7
0
def update_transformation_parameter(xml_path, parameter):
    if isinstance(parameter, (list, np.ndarray)):
        if len(parameter) != 12:
            raise ValueError(
                f"Expected affine transformation with 12 parameters, got {len(parameter)}"
            )
    elif isinstance(parameter, dict):
        if any(len(param) != 12 for param in parameter.values()):
            raise ValueError(
                "Expected all affine transformation with 12 parameters.")
    else:
        raise ValueError(f"Invalid affine transformation {parameter}")
    resolution = get_resolution(xml_path, setup_id=0)
    if np.product(resolution) != 1:
        warnings.warn(
            f"The xml file at {xml_path} has the resolution {resolution}."
            "The corresponding transformation will be over-written,"
            "make sure to factor it in with the transformation you have specified."
        )
    write_affine(xml_path,
                 setup_id=0,
                 affine=parameter,
                 overwrite=True,
                 timepoint=0)
示例#8
0
def add_bdv_image(xml_path,
                  root,
                  dataset_name,
                  image_name=None,
                  file_format="bdv.n5",
                  menu_name=None,
                  scale_factors=None,
                  tmp_folder=None,
                  target="local",
                  max_jobs=multiprocessing.cpu_count(),
                  is_default_dataset=False,
                  description=None,
                  trafos_for_mobie=None,
                  move_data=False,
                  int_to_uint=False):
    """Add the image(s) specified in an bdv xml file and copy the metadata.
    """
    # find how many timepoints we have
    t_start, t_stop = bdv_metadata.get_time_range(xml_path)
    if t_stop > t_start:
        raise NotImplementedError(
            "Only a single timepoint is currently supported.")

    # get the setup ids and check that image_name is compatible
    setup_ids = bdv_metadata.get_setup_ids(xml_path)

    if image_name is None:
        image_name = [None] * len(setup_ids)
    else:
        if isinstance(image_name, str):
            image_name = [image_name]

    assert len(image_name) == len(setup_ids)

    data_path = bdv_metadata.get_data_path(xml_path, return_absolute_path=True)

    # get the key for the input data format
    input_format = bdv_metadata.get_bdv_format(xml_path)

    move_only = False
    if move_data:
        if input_format == file_format:
            move_only = True
        else:
            print(
                "Different input format than target format. Will convert data instead of moving it."
            )

        if len(setup_ids) > 1:
            move_only = False
            print(
                "Cannot move XML with multiple setups. Will convert data instead of moving it."
            )

    for setup_id, name in zip(setup_ids, image_name):
        input_key = get_key(input_format == "bdv.hdf5",
                            timepoint=t_start,
                            setup_id=setup_id,
                            scale=0)

        # get the resolution, scale_factors, chunks and unit
        resolution = bdv_metadata.get_resolution(xml_path, setup_id)
        if scale_factors is None:
            scale_factors = get_scale_factors(data_path, setup_id)
            scale_factors = absolute_to_relative_scale_factors(
                scale_factors)[1:]
        with open_file(data_path, "r") as f:
            chunks = f[input_key].chunks
        unit = bdv_metadata.get_unit(xml_path, setup_id)

        # get the name of this source
        if name is None:
            name = bdv_metadata.get_name(xml_path, setup_id)

        # get the view (=MoBIE metadata) and transformation (=bdv metadata)
        # from the input bdv metadata
        view, transformation = _view_and_trafo_from_xml(
            xml_path, setup_id, t_start, name, menu_name, trafos_for_mobie)

        tmp_folder_ = None if tmp_folder is None else f"{tmp_folder}_{name}"
        add_image(data_path,
                  input_key,
                  root,
                  dataset_name,
                  image_name=name,
                  resolution=resolution,
                  scale_factors=scale_factors,
                  chunks=chunks,
                  file_format=file_format,
                  menu_name=menu_name,
                  tmp_folder=tmp_folder_,
                  target=target,
                  max_jobs=max_jobs,
                  unit=unit,
                  view=view,
                  transformation=transformation,
                  is_default_dataset=is_default_dataset,
                  description=description,
                  move_only=move_only,
                  int_to_uint=int_to_uint)
示例#9
0
def load_resolution(scale=0):
    xml = '/g/rompani/lgn-em-datasets/data/0.0.0/images/local/sbem-adult-1-lgn-raw.xml'
    scale_factor = load_scale_factors(scale)
    res = get_resolution(xml, setup_id=0)
    res = [re * sf for re, sf in zip(res, scale_factor)]
    return res