def convert_tif_to_hdf(input_path, output_path, key_path="entry/data", crop=(0, 0, 0, 0), pattern=None, **options): """ Convert a folder of tif files to a hdf/nxs file. Parameters ---------- input_path : str Folder path to the tif files. output_path : str Path to the hdf/nxs file. key_path : str, optional Key path to the dataset. crop : tuple of int, optional Crop the images from the edges, i.e. crop = (crop_top, crop_bottom, crop_left, crop_right). pattern : str, optional Used to find tif files with names matching the pattern. options : dict, optional Add metadata. E.g. options={"entry/angles": angles, "entry/energy": 53}. Returns ------- str Path to the hdf/nxs file. """ if pattern is None: list_file = losa.find_file(input_path + "/*.tif*") else: list_file = losa.find_file(input_path + "/*" + pattern + "*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_path)) (height, width) = np.shape(losa.load_image(list_file[0])) file_base, file_ext = os.path.splitext(output_path) if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): file_ext = '.hdf' output_path = file_base + file_ext cr_top, cr_bottom, cr_left, cr_right = crop cr_height = height - cr_top - cr_bottom cr_width = width - cr_left - cr_right data_out = losa.open_hdf_stream(output_path, (depth, cr_height, cr_width), key_path=key_path, overwrite=True, **options) for i, fname in enumerate(list_file): data_out[i] = losa.load_image(fname)[cr_top:cr_height + cr_top, cr_left:cr_width + cr_left] return output_path
result_tmp = [] for slice_idx in range(slice_start, slice_stop): pos = np.squeeze(np.where(list1 == slice_idx)[0]) if pos.size == 1: result_tmp.append(pos) if len(result_tmp) > 0: result_tmp = np.asarray(result_tmp) results.append([i, result_tmp[0], result_tmp[-1]]) return results print("!!! Start !!!") time_start = timeit.default_timer() # Open hdf_stream for saving data. hdf_stream = losa.open_hdf_stream(output_base + "/" + output_file, (dsp_slice, dsp_height, dsp_width), key_path) list_idx_nslice = np.reshape(np.arange(total_slice_r), (dsp_slice, cube[0])) dsp_method = np.mean # Use mean for downsampling for idx in np.arange(dsp_slice): slice_start = list_idx_nslice[idx, 0] slice_stop = list_idx_nslice[idx, -1] + 1 slices = locate_slice_chunk(slice_start, slice_stop, list_slices) if len(slices) == 1: data_chunk = list_hdf_object[slices[0][0]][slices[0][1]:slices[0][2] + 1, :height_r, :width_r] else: data_chunk1 = list_hdf_object[slices[0][0]][slices[0][1]:slices[0][2] + 1, :height_r, :width_r] data_chunk2 = list_hdf_object[slices[1][0]][slices[1][1]:slices[1][2] + 1, :height_r, :width_r]
def test_open_hdf_stream(self): data_out = losa.open_hdf_stream("data/data.hdf", (64, 64)) self.assertTrue(isinstance(data_out, object))
def rescale_dataset(input_, output, nbit=16, minmax=None, skip=None, key_path=None): """ Rescale a dataset to 8-bit or 16-bit data-type. The dataset can be a folder of tif files, a hdf file, or a 3D array. Parameters ---------- input_ : str, array_like It can be a folder path to tif files, a hdf file, or 3D array. output : str, None It can be a folder path, a hdf file path, or None (memory consuming). nbit : {8,16} Rescaled data-type: 8-bit or 16-bit. minmax : tuple of float, or None Minimum and maximum values used for rescaling. They are calculated if None is given. skip : int or None Skipping step of reading input used for getting statistical information. key_path : str, optional Key path to the dataset if the input is the hdf file. Returns ------- array_like or None If output is None, returning an 3D array. """ if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": file_base = os.path.dirname(output) if os.path.exists(file_base): raise ValueError("Folder exists!!! Please choose another path!!!") if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""): list_file = losa.find_file(input_ + "/*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_)) if minmax is None: if skip is None: skip = int(np.ceil(0.15 * depth)) (gmin, gmax) = get_statical_information_dataset(input_, skip=skip)[0:2] else: (gmin, gmax) = minmax if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError("File extension must be hdf, h5, or nxs") output = file_base + file_ext (height, width) = np.shape(losa.load_image(list_file[0])) if nbit == 8: data_type = "uint8" else: data_type = "uint16" data_out = losa.open_hdf_stream(output, (depth, height, width), key_path="rescale/data", data_type=data_type, overwrite=False) data_res = [] for i in range(0, depth): mat = rescale(losa.load_image(list_file[i]), nbit=nbit, minmax=(gmin, gmax)) if output is None: data_res.append(mat) else: file_base, file_ext = os.path.splitext(output) if file_ext == "": out_name = "0000" + str(i) losa.save_image(output + "/img_" + out_name[-5:] + ".tif", mat) else: data_out[i] = mat else: if isinstance(input_, str): file_ext = os.path.splitext(input_)[-1] if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "Can't open this type of file format {}".format(file_ext)) if key_path is None: raise ValueError( "Please provide the key path to the dataset!!!") input_ = losa.load_hdf(input_, key_path) (depth, height, width) = input_.shape if minmax is None: if skip is None: skip = int(np.ceil(0.15 * depth)) (gmin, gmax) = get_statical_information_dataset(input_, skip=skip, key_path=key_path)[0:2] else: (gmin, gmax) = minmax data_res = [] if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError("File extension must be hdf, h5, or nxs") output = file_base + file_ext if nbit == 8: data_type = "uint8" else: data_type = "uint16" data_out = losa.open_hdf_stream(output, (depth, height, width), key_path="rescale/data", data_type=data_type, overwrite=False) for i in range(0, depth): mat = rescale(input_[i], nbit=nbit, minmax=(gmin, gmax)) if output is None: data_res.append(mat) else: file_base, file_ext = os.path.splitext(output) if file_ext != "": data_out[i] = mat else: out_name = "0000" + str(i) losa.save_image(output + "/img_" + out_name[-5:] + ".tif", mat) if output is None: return np.asarray(data_res)
def downsample_dataset(input_, output, cell_size, method="mean", key_path=None): """ Downsample a dataset. This can be a folder of tif files, a hdf file, or a 3D array. Parameters ---------- input_ : str, array_like It can be a folder path to tif files, a hdf file, or 3D array. output : str, None It can be a folder path, a hdf file path, or None (memory consuming). cell_size : int or tuple of int Window size along axes used for grouping pixels. method : {"mean", "median", "max", "min"} Downsampling method. key_path : str, optional Key path to the dataset if the input is the hdf file. Returns ------- array_like or None If output is None, returning an 3D array. """ if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": file_base = os.path.dirname(output) if os.path.exists(file_base): raise ValueError("Folder exists!!! Please choose another path!!!") if method == "median": dsp_method = np.median elif method == "max": dsp_method = np.max elif method == "min": dsp_method = np.amin else: dsp_method = np.mean if isinstance(cell_size, int): cell_size = (cell_size, cell_size, cell_size) if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""): list_file = losa.find_file(input_ + "/*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_)) (height, width) = np.shape(losa.load_image(list_file[0])) depth_dsp = depth // cell_size[0] height_dsp = height // cell_size[1] width_dsp = width // cell_size[2] num = 0 if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0): if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "File extension must be hdf, h5, or nxs") output = file_base + file_ext data_out = losa.open_hdf_stream( output, (depth_dsp, height_dsp, width_dsp), key_path="downsample/data", overwrite=False) data_dsp = [] for i in range(0, depth, cell_size[0]): if (i + cell_size[0]) > depth: break else: mat = [] for j in range(i, i + cell_size[0]): mat.append(losa.load_image(list_file[j])) mat = np.asarray(mat) mat = mat[:, :height_dsp * cell_size[1], :width_dsp * cell_size[2]] mat = mat.reshape(1, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) mat_dsp = dsp_method(dsp_method(dsp_method(mat, axis=-1), axis=1), axis=2) if output is None: data_dsp.append(mat_dsp[0]) else: if file_ext == "": out_name = "0000" + str(num) losa.save_image( output + "/img_" + out_name[-5:] + ".tif", mat_dsp[0]) else: data_out[num] = mat_dsp[0] num += 1 else: raise ValueError("Incorrect cell size {}".format(cell_size)) else: if isinstance(input_, str): file_ext = os.path.splitext(input_)[-1] if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "Can't open this type of file format {}".format(file_ext)) if key_path is None: raise ValueError( "Please provide the key path to the dataset!!!") input_ = losa.load_hdf(input_, key_path) (depth, height, width) = input_.shape depth_dsp = depth // cell_size[0] height_dsp = height // cell_size[1] width_dsp = width // cell_size[2] if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0): if output is None: input_ = input_[:depth_dsp * cell_size[0], :height_dsp * cell_size[1], :width_dsp * cell_size[2]] input_ = input_.reshape(depth_dsp, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) data_dsp = dsp_method(dsp_method(dsp_method(input_, axis=-1), axis=1), axis=2) else: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "File extension must be hdf, h5, or nxs") output = file_base + file_ext data_out = losa.open_hdf_stream( output, (depth_dsp, height_dsp, width_dsp), key_path="downsample/data", overwrite=False) num = 0 for i in range(0, depth, cell_size[0]): if (i + cell_size[0]) > depth: break else: mat = input_[i:i + cell_size[0], :height_dsp * cell_size[1], :width_dsp * cell_size[2]] mat = mat.reshape(1, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) mat_dsp = dsp_method(dsp_method(dsp_method(mat, axis=-1), axis=1), axis=2) if file_ext != "": data_out[num] = mat_dsp[0] else: out_name = "0000" + str(num) losa.save_image( output + "/img_" + out_name[-5:] + ".tif", mat_dsp[0]) num += 1 else: raise ValueError("Incorrect cell size {}".format(cell_size)) if output is None: return np.asarray(data_dsp)
# print(sam_shifts) # Align image stacks (ref_stack_cr, sam_stack_cr) = f_alias4(ref_stack, sam_stack, sr_shifts, sam_shifts) # Check results to make sure for i in range(num_use): name = ("0000" + str(i))[-5:] losa.save_image(output_base + "/aligned/ref/img_" + name + ".tif", ref_stack_cr[i]) losa.save_image(output_base + "/aligned/sam/img_" + name + ".tif", sam_stack_cr[i]) # Open hdf stream to save data phase_hdf = losa.open_hdf_stream(output_base + "/phase.hdf", (num_proj, height1, width1), key_path="entry/data") if get_trans_dark_signal: trans_hdf = losa.open_hdf_stream(output_base + "/transmission.hdf", (num_proj, height1, width1), key_path="entry/data") dark_hdf = losa.open_hdf_stream(output_base + "/dark_signal.hdf", (num_proj, height1, width1), key_path="entry/data") # Assign aliases to functions for convenient use f_alias5 = losa.get_reference_sample_stacks f_alias6 = ps.retrieve_phase_based_speckle_tracking f_alias7 = ps.get_transmission_dark_field_signal t0 = timeit.default_timer() for i in range(num_proj):
# Note: we don't apply MTF deconvolution to dark-noise images. dark_field = corr.unwarp_projection(dark_field, xcenter, ycenter, list_fact) # Calculate parameters for looping. start_image = proj_idx[0] stop_image = proj_idx[-1] + 1 total_image = stop_image - start_image offset = start_image if chunk > total_image: chunk = total_image num_iter = total_image // chunk num_rest = total_image - num_iter * chunk # Open hdf stream for saving data. output_hdf = losa.open_hdf_stream(output_base + "/" + ofile_name , (total_image, height, width), key_path="entry/data", data_type="float32") for i in range(num_iter): start_proj = i * chunk + offset stop_proj = start_proj + chunk # Load projections proj_chunk = data[start_proj:stop_proj] # Apply MTF deconvolution proj_chunk = np.asarray(Parallel(n_jobs=ncore, backend="threading")( delayed(corr.mtf_deconvolution)(proj_chunk[i], mtf_win, mtf_pad) for i in range(chunk))) # Apply distortion correction proj_chunk = np.asarray(Parallel(n_jobs=ncore, backend="threading")( delayed(corr.unwarp_projection)(proj_chunk[i], xcenter, ycenter, list_fact) for i in range(chunk))) # Apply flat-field correction proj_corr = corr.flat_field_correction(proj_chunk[:, top:bot, left:right], flat_field[top:bot, left:right], dark_field[top:bot, left:right])