def copy_to_bdv_n5(in_file, out_file, chunks, resolution, n_threads=32, start_scale=0): n_scales = get_number_of_scales(in_file, 0, 0) scale_factors = get_scale_factors(in_file, 0) # double check newly implemented functions in pybdv assert n_scales == len(scale_factors) scale_factors = normalize_scale_factors(scale_factors, start_scale) for out_scale, in_scale in enumerate(range(start_scale, n_scales)): in_key = get_key(True, 0, 0, in_scale) out_key = get_key(False, 0, 0, out_scale) if chunks is None: with open_file(in_file, 'r') as f: chunks_ = f[in_key].chunks else: chunks_ = chunks copy_dataset(in_file, in_key, out_file, out_key, convert_dtype=False, chunks=chunks_, n_threads=n_threads) copy_attributes(in_file, in_key, out_file, out_key) write_n5_metadata(out_file, scale_factors, resolution, setup_id=0)
def get_ds_factors(): in_file = '../data/rawdata/sbem-6dpf-1-whole-raw.h5' abs_scale_factors = get_scale_factors(in_file, 0)[1:] scale_factors = [[int(sf) for sf in abs_scale_factors[0]]] for ii in range(1, len(abs_scale_factors)): rel_sf = [ int(sf1 / sf2) for sf1, sf2 in zip(abs_scale_factors[ii], abs_scale_factors[ii - 1]) ] scale_factors.append(rel_sf) return scale_factors
def _check(exp_data, exp_sf, exp_attrs, exp_affine): key = get_key(self.is_h5, timepoint=0, setup_id=0, scale=0) with open_file(self.out_path, 'r') as f: data = f[key][:] self.assertTrue(np.allclose(data, exp_data)) sf = get_scale_factors(self.out_path, setup_id=0) sf = absolute_to_relative_scale_factors(sf) self.assertEqual(sf, [[1, 1, 1]] + exp_sf) attrs = get_attributes(self.xml_path, setup_id=0) self.assertEqual(attrs, exp_attrs) affine = get_affine(self.xml_path, setup_id=0, timepoint=0)['affine0'] self.assertTrue(np.allclose(np.array(affine), np.array(exp_affine), atol=1e-4))
def update_shell(): p_tiff = '../../EM-Prospr/shell_seg.tiff' p_res_n5 = '../data/rawdata/sbem-6dpf-1-whole-segmented-resin.n5' p_res_xml = '../data/rawdata/sbem-6dpf-1-whole-segmented-resin.xml' scale_factors = get_scale_factors(p_res_n5, 0)[1:] resolution = get_resolution(p_res_xml, 0) scale_factors = [[2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2]] print(scale_factors) print(resolution) print("Load tiff ...") shell = np.asarray(imageio.volread(p_tiff)) print(shell.shape) print("Write bdv") out_path = 'sbem-6dpf-1-whole-segmented-shell.n5' make_bdv(shell, out_path, downscale_factors=scale_factors, resolution=resolution, unit='micrometer', n_threads=8, chunks=(96,) * 3, convert_dtype=False)
def test_get_scale_factors(self): from pybdv.util import get_scale_factors scale_factors = get_scale_factors(self.out_path, 0) self.assertEqual(scale_factors, self.abs_scale_factors)
def add_bdv_image(xml_path, root, dataset_name, image_name=None, file_format="bdv.n5", menu_name=None, scale_factors=None, tmp_folder=None, target="local", max_jobs=multiprocessing.cpu_count(), is_default_dataset=False, description=None, trafos_for_mobie=None, move_data=False, int_to_uint=False): """Add the image(s) specified in an bdv xml file and copy the metadata. """ # find how many timepoints we have t_start, t_stop = bdv_metadata.get_time_range(xml_path) if t_stop > t_start: raise NotImplementedError( "Only a single timepoint is currently supported.") # get the setup ids and check that image_name is compatible setup_ids = bdv_metadata.get_setup_ids(xml_path) if image_name is None: image_name = [None] * len(setup_ids) else: if isinstance(image_name, str): image_name = [image_name] assert len(image_name) == len(setup_ids) data_path = bdv_metadata.get_data_path(xml_path, return_absolute_path=True) # get the key for the input data format input_format = bdv_metadata.get_bdv_format(xml_path) move_only = False if move_data: if input_format == file_format: move_only = True else: print( "Different input format than target format. Will convert data instead of moving it." ) if len(setup_ids) > 1: move_only = False print( "Cannot move XML with multiple setups. Will convert data instead of moving it." ) for setup_id, name in zip(setup_ids, image_name): input_key = get_key(input_format == "bdv.hdf5", timepoint=t_start, setup_id=setup_id, scale=0) # get the resolution, scale_factors, chunks and unit resolution = bdv_metadata.get_resolution(xml_path, setup_id) if scale_factors is None: scale_factors = get_scale_factors(data_path, setup_id) scale_factors = absolute_to_relative_scale_factors( scale_factors)[1:] with open_file(data_path, "r") as f: chunks = f[input_key].chunks unit = bdv_metadata.get_unit(xml_path, setup_id) # get the name of this source if name is None: name = bdv_metadata.get_name(xml_path, setup_id) # get the view (=MoBIE metadata) and transformation (=bdv metadata) # from the input bdv metadata view, transformation = _view_and_trafo_from_xml( xml_path, setup_id, t_start, name, menu_name, trafos_for_mobie) tmp_folder_ = None if tmp_folder is None else f"{tmp_folder}_{name}" add_image(data_path, input_key, root, dataset_name, image_name=name, resolution=resolution, scale_factors=scale_factors, chunks=chunks, file_format=file_format, menu_name=menu_name, tmp_folder=tmp_folder_, target=target, max_jobs=max_jobs, unit=unit, view=view, transformation=transformation, is_default_dataset=is_default_dataset, description=description, move_only=move_only, int_to_uint=int_to_uint)
def load_scale_factors(scale): path = '/g/rompani/lgn-em-datasets/data/0.0.0/images/local/sbem-adult-1-lgn-raw.n5' scale_factors = get_scale_factors(path, setup_id=0) return scale_factors[scale]