Exemple #1
0
def extract_tif_from_hdf(input_path, output_path, key_path, index=(0, -1, 1),
                         axis=0, crop=(0, 0, 0, 0), prefix="img"):
    """
    Extract tif images from a hdf/nxs file.

    Parameters
    ----------
    input_path : str
        Path to the hdf/nxs file.
    output_path : str
        Output folder.
    key_path : str
        Key path to the dataset in the hdf/nxs file.
    index : tuple of int or int.
        Indices of extracted images. A tuple corresponds to (start,stop,step).
    axis : int
        Axis which the images are extracted.
    crop : tuple of int, optional
        Crop the images from the edges, i.e.
        crop = (crop_top, crop_bottom, crop_left, crop_right).
    prefix : str, optional
        Prefix of names of tif files.

    Returns
    -------
    str
        Folder path to the tif files.
    """
    data = losa.load_hdf(input_path, key_path)
    (depth, height, width) = data.shape
    if isinstance(index, tuple):
        start, stop, step = index
    else:
        start, stop, step = index, index + 1, 1
    cr_top, cr_bottom, cr_left, cr_right = crop
    if axis == 1:
        if (stop == -1) or (stop > height):
            stop = height
        for i in range(start, stop, step):
            mat = data[cr_top:depth - cr_bottom, i, cr_left:width - cr_right]
            out_name = "0000" + str(i)
            losa.save_image(
                output_path + "/" + prefix + "_" + out_name[-5:] + ".tif", mat)
    elif axis == 2:
        if (stop == -1) or (stop > width):
            stop = width
        for i in range(start, stop, step):
            mat = data[cr_top:depth - cr_bottom, cr_left:height - cr_right, i]
            out_name = "0000" + str(i)
            losa.save_image(
                output_path + "/" + prefix + "_" + out_name[-5:] + ".tif", mat)
    else:
        if (stop == -1) or (stop > depth):
            stop = depth
        for i in range(start, stop, step):
            mat = data[i, cr_top:height - cr_bottom, cr_left:width - cr_right]
            out_name = "0000" + str(i)
            losa.save_image(
                output_path + "/" + prefix + "_" + out_name[-5:] + ".tif", mat)
    return output_path
Exemple #2
0
 def test_load_hdf(self):
     file_path = "data/data.hdf"
     ifile = h5py.File(file_path, "w")
     ifile.create_dataset("entry/data", data=np.random.rand(64, 64))
     ifile.close()
     data = losa.load_hdf(file_path, "entry/data")[:]
     self.assertTrue(isinstance(data, np.ndarray))
import algotom.prep.filtering as filt
import algotom.rec.reconstruction as reco

# Paths to data
proj_path = "D:/data/scan_00010/projections_00000.hdf"
flat_path = "D:/data/scan_00009/flats_00000.hdf"
dark_path = "D:/data/scan_00009/darks_00000.hdf"
meta_path = "D:/data/scan_00010/scan_00010.nxs"
key_path = "/entry/data/data"
pixel_size = 3.24297964149*10**(-3) # mm. Calibrated at the beamtime.

# Where to save the outputs
output_base = "D:/output/"

# Load data of projection images as an hdf object
proj_data = losa.load_hdf(proj_path, key_path)
(depth, height, width) = proj_data.shape
# Load flat-field images and dark-field images, average each of them
flat_field = np.mean(losa.load_hdf(flat_path, key_path)[:], axis=0)
dark_field = np.mean(losa.load_hdf(dark_path, key_path)[:], axis=0)
# Load metadata of the helical scan
pitch = np.float32(losa.load_hdf(meta_path, "/entry1/information/pitch")) #mm
angles = np.float32(losa.load_hdf(meta_path, "/entry1/tomo_entry/instrument/detector/rotation_angle")) #Degree
num_proj = np.int16(losa.load_hdf(meta_path, "/entry1/information/number_projections")) #mm
y_start = np.float32(losa.load_hdf(meta_path, "/entry1/information/y_start"))
y_stop = np.float32(losa.load_hdf(meta_path, "/entry1/information/y_stop"))

scan_type = "360"
(y_s, y_e) = calc.calculate_reconstructable_height(y_start, y_stop, pitch, scan_type)
max_index = calc.calculate_maximum_index(y_start, y_stop, pitch, pixel_size, scan_type)
print("1 -> Given y_start: {0}, y_stop: {1}, pitch: {2}, and scan_type: '{3}'".format(y_start, y_stop, pitch, scan_type))
Exemple #4
0
input_base = "D:/Full_reconstruction/"

# Where to save the outputs
output_base = "D:/Dsp_grid_scan/"
output_file = "full_size_dsp_8_8_8.hdf"
cube = (8, 8, 8)  # Downsampling factor

list_file = losa.find_file(input_base + "*.hdf")
key_path = "entry/data"

list_hdf_object = []
num_file = len(list_file)
list_nslice = []
for i in range(num_file):
    hdf_object = losa.load_hdf(list_file[i], key_path)
    list_hdf_object.append(hdf_object)
    (nslice, height, width) = hdf_object.shape
    list_nslice.append(nslice)
total_slice = np.sum(np.asarray(list_nslice))

total_slice_r = (total_slice // cube[0]) * cube[0]
height_r = (height // cube[1]) * cube[1]
width_r = (width // cube[2]) * cube[2]

# Calculate the size of downsampled data.
dsp_slice = total_slice_r // cube[0]
dsp_height = height_r // cube[1]
dsp_width = width_r // cube[2]

next_slice = 0
Exemple #5
0
def get_statical_information_dataset(input_,
                                     percentile=(5, 95),
                                     skip=5,
                                     denoise=False,
                                     key_path=None):
    """
    Get statical information of a dataset. This can be a folder of tif files,
    a hdf file, or a 3D array.

    Parameters
    ----------
    input_ : str, hdf file, or array_like
        It can be a folder path to tif files, a hdf file, or a 3D array.
    percentile : tuple of floats
        Tuple of (min_percentile, max_percentile) to compute.
        Must be between 0 and 100 inclusive.
    skip : int
        Skipping step of reading input.
    denoise: bool, optional
        Enable/disable denoising before extracting statistical information.
    key_path : str, optional
        Key path to the dataset if the input is the hdf file.

    Returns
    -------
    gmin : float
        The global minimum value of the data array.
    gmax : float
        The global maximum value of the data array.
    min_percent : float
        The global min of the first computed percentile of the data array.
    max_percent : tuple of floats
        The global min of the last computed percentile of the data array.
    mean : float
        The mean of the data array.
    median : float
        The median of the data array.
    variance : float
        The mean of the variance of the data array.
    """
    if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""):
        list_file = losa.find_file(input_ + "/*.tif*")
        depth = len(list_file)
        if depth == 0:
            raise ValueError("No tif files in the folder: {}".format(input_))
        list_stat = []
        for i in range(0, depth, skip):
            mat = losa.load_image(list_file[i])
            if denoise is True:
                mat = gaussian_filter(mat, 2)
            list_stat.append(get_statical_information(mat, percentile,
                                                      denoise))
    else:
        if isinstance(input_, str):
            file_ext = os.path.splitext(input_)[-1]
            if not (file_ext == '.hdf' or file_ext == '.h5'
                    or file_ext == ".nxs"):
                raise ValueError(
                    "Can't open this type of file format {}".format(file_ext))
            if key_path is None:
                raise ValueError(
                    "Please provide the key path to the dataset!!!")
            input_ = losa.load_hdf(input_, key_path)
        depth = len(input_)
        list_stat = []
        for i in range(0, depth, skip):
            mat = input_[i]
            if denoise is True:
                mat = gaussian_filter(mat, 2)
            list_stat.append(get_statical_information(mat, percentile,
                                                      denoise))
    list_stat = np.asarray(list_stat)
    gmin = np.min(list_stat[:, 0])
    gmax = np.max(list_stat[:, 1])
    min_percent = np.min(list_stat[:, 2])
    max_percent = np.max(list_stat[:, 3])
    median = np.median(list_stat[:, 4])
    mean = np.mean(list_stat[:, 5])
    variance = np.mean(list_stat[:, 6])
    return gmin, gmax, min_percent, max_percent, mean, median, variance
Exemple #6
0
def rescale_dataset(input_,
                    output,
                    nbit=16,
                    minmax=None,
                    skip=None,
                    key_path=None):
    """
    Rescale a dataset to 8-bit or 16-bit data-type. The dataset can be a
    folder of tif files, a hdf file, or a 3D array.

    Parameters
    ----------
    input_ : str, array_like
        It can be a folder path to tif files, a hdf file, or 3D array.
    output : str, None
        It can be a folder path, a hdf file path, or None (memory consuming).
    nbit : {8,16}
        Rescaled data-type: 8-bit or 16-bit.
    minmax : tuple of float, or None
        Minimum and maximum values used for rescaling. They are calculated if
        None is given.
    skip : int or None
        Skipping step of reading input used for getting statistical information.
    key_path : str, optional
        Key path to the dataset if the input is the hdf file.

    Returns
    -------
    array_like or None
        If output is None, returning an 3D array.
    """
    if output is not None:
        file_base, file_ext = os.path.splitext(output)
        if file_ext != "":
            file_base = os.path.dirname(output)
        if os.path.exists(file_base):
            raise ValueError("Folder exists!!! Please choose another path!!!")
    if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""):
        list_file = losa.find_file(input_ + "/*.tif*")
        depth = len(list_file)
        if depth == 0:
            raise ValueError("No tif files in the folder: {}".format(input_))
        if minmax is None:
            if skip is None:
                skip = int(np.ceil(0.15 * depth))
            (gmin, gmax) = get_statical_information_dataset(input_,
                                                            skip=skip)[0:2]
        else:
            (gmin, gmax) = minmax
        if output is not None:
            file_base, file_ext = os.path.splitext(output)
            if file_ext != "":
                if not (file_ext == '.hdf' or file_ext == '.h5'
                        or file_ext == ".nxs"):
                    raise ValueError("File extension must be hdf, h5, or nxs")
                output = file_base + file_ext
                (height, width) = np.shape(losa.load_image(list_file[0]))
                if nbit == 8:
                    data_type = "uint8"
                else:
                    data_type = "uint16"
                data_out = losa.open_hdf_stream(output, (depth, height, width),
                                                key_path="rescale/data",
                                                data_type=data_type,
                                                overwrite=False)
        data_res = []
        for i in range(0, depth):
            mat = rescale(losa.load_image(list_file[i]),
                          nbit=nbit,
                          minmax=(gmin, gmax))
            if output is None:
                data_res.append(mat)
            else:
                file_base, file_ext = os.path.splitext(output)
                if file_ext == "":
                    out_name = "0000" + str(i)
                    losa.save_image(output + "/img_" + out_name[-5:] + ".tif",
                                    mat)
                else:
                    data_out[i] = mat
    else:
        if isinstance(input_, str):
            file_ext = os.path.splitext(input_)[-1]
            if not (file_ext == '.hdf' or file_ext == '.h5'
                    or file_ext == ".nxs"):
                raise ValueError(
                    "Can't open this type of file format {}".format(file_ext))
            if key_path is None:
                raise ValueError(
                    "Please provide the key path to the dataset!!!")
            input_ = losa.load_hdf(input_, key_path)
        (depth, height, width) = input_.shape
        if minmax is None:
            if skip is None:
                skip = int(np.ceil(0.15 * depth))
            (gmin,
             gmax) = get_statical_information_dataset(input_,
                                                      skip=skip,
                                                      key_path=key_path)[0:2]
        else:
            (gmin, gmax) = minmax
        data_res = []
        if output is not None:
            file_base, file_ext = os.path.splitext(output)
            if file_ext != "":
                if not (file_ext == '.hdf' or file_ext == '.h5'
                        or file_ext == ".nxs"):
                    raise ValueError("File extension must be hdf, h5, or nxs")
                output = file_base + file_ext
                if nbit == 8:
                    data_type = "uint8"
                else:
                    data_type = "uint16"
                data_out = losa.open_hdf_stream(output, (depth, height, width),
                                                key_path="rescale/data",
                                                data_type=data_type,
                                                overwrite=False)
        for i in range(0, depth):
            mat = rescale(input_[i], nbit=nbit, minmax=(gmin, gmax))
            if output is None:
                data_res.append(mat)
            else:
                file_base, file_ext = os.path.splitext(output)
                if file_ext != "":
                    data_out[i] = mat
                else:
                    out_name = "0000" + str(i)
                    losa.save_image(output + "/img_" + out_name[-5:] + ".tif",
                                    mat)
    if output is None:
        return np.asarray(data_res)
Exemple #7
0
def downsample_dataset(input_,
                       output,
                       cell_size,
                       method="mean",
                       key_path=None):
    """
    Downsample a dataset. This can be a folder of tif files, a hdf file,
    or a 3D array.

    Parameters
    ----------
    input_ : str, array_like
        It can be a folder path to tif files, a hdf file, or 3D array.
    output : str, None
        It can be a folder path, a hdf file path, or None (memory consuming).
    cell_size : int or tuple of int
        Window size along axes used for grouping pixels.
    method : {"mean", "median", "max", "min"}
        Downsampling method.
    key_path : str, optional
        Key path to the dataset if the input is the hdf file.

    Returns
    -------
    array_like or None
        If output is None, returning an 3D array.
    """
    if output is not None:
        file_base, file_ext = os.path.splitext(output)
        if file_ext != "":
            file_base = os.path.dirname(output)
        if os.path.exists(file_base):
            raise ValueError("Folder exists!!! Please choose another path!!!")
    if method == "median":
        dsp_method = np.median
    elif method == "max":
        dsp_method = np.max
    elif method == "min":
        dsp_method = np.amin
    else:
        dsp_method = np.mean
    if isinstance(cell_size, int):
        cell_size = (cell_size, cell_size, cell_size)
    if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""):
        list_file = losa.find_file(input_ + "/*.tif*")
        depth = len(list_file)
        if depth == 0:
            raise ValueError("No tif files in the folder: {}".format(input_))
        (height, width) = np.shape(losa.load_image(list_file[0]))
        depth_dsp = depth // cell_size[0]
        height_dsp = height // cell_size[1]
        width_dsp = width // cell_size[2]
        num = 0
        if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0):
            if output is not None:
                file_base, file_ext = os.path.splitext(output)
                if file_ext != "":
                    if not (file_ext == '.hdf' or file_ext == '.h5'
                            or file_ext == ".nxs"):
                        raise ValueError(
                            "File extension must be hdf, h5, or nxs")
                output = file_base + file_ext
                data_out = losa.open_hdf_stream(
                    output, (depth_dsp, height_dsp, width_dsp),
                    key_path="downsample/data",
                    overwrite=False)
            data_dsp = []
            for i in range(0, depth, cell_size[0]):
                if (i + cell_size[0]) > depth:
                    break
                else:
                    mat = []
                    for j in range(i, i + cell_size[0]):
                        mat.append(losa.load_image(list_file[j]))
                    mat = np.asarray(mat)
                    mat = mat[:, :height_dsp * cell_size[1], :width_dsp *
                              cell_size[2]]
                    mat = mat.reshape(1, cell_size[0], height_dsp,
                                      cell_size[1], width_dsp, cell_size[2])
                    mat_dsp = dsp_method(dsp_method(dsp_method(mat, axis=-1),
                                                    axis=1),
                                         axis=2)
                    if output is None:
                        data_dsp.append(mat_dsp[0])
                    else:
                        if file_ext == "":
                            out_name = "0000" + str(num)
                            losa.save_image(
                                output + "/img_" + out_name[-5:] + ".tif",
                                mat_dsp[0])
                        else:
                            data_out[num] = mat_dsp[0]
                        num += 1
        else:
            raise ValueError("Incorrect cell size {}".format(cell_size))
    else:
        if isinstance(input_, str):
            file_ext = os.path.splitext(input_)[-1]
            if not (file_ext == '.hdf' or file_ext == '.h5'
                    or file_ext == ".nxs"):
                raise ValueError(
                    "Can't open this type of file format {}".format(file_ext))
            if key_path is None:
                raise ValueError(
                    "Please provide the key path to the dataset!!!")
            input_ = losa.load_hdf(input_, key_path)
        (depth, height, width) = input_.shape
        depth_dsp = depth // cell_size[0]
        height_dsp = height // cell_size[1]
        width_dsp = width // cell_size[2]
        if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0):
            if output is None:
                input_ = input_[:depth_dsp * cell_size[0], :height_dsp *
                                cell_size[1], :width_dsp * cell_size[2]]
                input_ = input_.reshape(depth_dsp, cell_size[0], height_dsp,
                                        cell_size[1], width_dsp, cell_size[2])
                data_dsp = dsp_method(dsp_method(dsp_method(input_, axis=-1),
                                                 axis=1),
                                      axis=2)
            else:
                file_base, file_ext = os.path.splitext(output)
                if file_ext != "":
                    if not (file_ext == '.hdf' or file_ext == '.h5'
                            or file_ext == ".nxs"):
                        raise ValueError(
                            "File extension must be hdf, h5, or nxs")
                    output = file_base + file_ext
                    data_out = losa.open_hdf_stream(
                        output, (depth_dsp, height_dsp, width_dsp),
                        key_path="downsample/data",
                        overwrite=False)
                num = 0
                for i in range(0, depth, cell_size[0]):
                    if (i + cell_size[0]) > depth:
                        break
                    else:
                        mat = input_[i:i + cell_size[0], :height_dsp *
                                     cell_size[1], :width_dsp * cell_size[2]]
                        mat = mat.reshape(1, cell_size[0], height_dsp,
                                          cell_size[1], width_dsp,
                                          cell_size[2])
                        mat_dsp = dsp_method(dsp_method(dsp_method(mat,
                                                                   axis=-1),
                                                        axis=1),
                                             axis=2)
                        if file_ext != "":
                            data_out[num] = mat_dsp[0]
                        else:
                            out_name = "0000" + str(num)
                            losa.save_image(
                                output + "/img_" + out_name[-5:] + ".tif",
                                mat_dsp[0])
                        num += 1
        else:
            raise ValueError("Incorrect cell size {}".format(cell_size))
    if output is None:
        return np.asarray(data_dsp)
Exemple #8
0
import algotom.prep.calculation as calc
import algotom.prep.conversion as conv
import algotom.prep.filtering as filt
import algotom.rec.reconstruction as reco
import timeit

proj_path = "D:/data/scan_00008/projections_00000.hdf"
flat_path = "D:/data/scan_00009/flats_00000.hdf"
dark_path = "D:/data/scan_00009/darks_00000.hdf"
meta_path = "D:/data/scan_00008/scan_00008.nxs"
key_path = "/entry/data/data"
angle_key = "/entry1/tomo_entry/data/rotation_angle"

output_base = "D:/output/"

data = losa.load_hdf(proj_path, key_path)  # This is an object not ndarray.
(depth, height, width) = data.shape
angles = np.squeeze(np.asarray(losa.load_hdf(meta_path, angle_key)[:]))
# Load dark-field images and flat-field images, averaging each result.
flat_field = np.mean(losa.load_hdf(flat_path, key_path)[:], axis=0)
dark_field = np.mean(losa.load_hdf(dark_path, key_path)[:], axis=0)
# Generate a sinogram and perform reconstruction.
index = height // 2
print("1 -> Extract a sinogram with flat-field correction")
sino_360 = corr.flat_field_correction(data[:, index, :], flat_field[index],
                                      dark_field[index])
t0 = timeit.default_timer()
print(
    "2 -> Calculate the center-of-rotation, the overlap-side and overlap-area used for stitching"
)
(center0, overlap, side, _) = calc.find_center_360(sino_360, 100)
Exemple #9
0
df_name = "scan_" + prefix[-5:]
proj_name = []
for i in proj_scan:
    prefix = "0000" + str(i)
    proj_name.append("scan_" + prefix[-5:])

# Separate scans to 8 rows x 3 columns
num_scan_total = len(proj_scan)
num_scan_col = 3
num_scan_row = num_scan_total // num_scan_col
scan_grid = np.reshape(proj_scan, (num_scan_row, num_scan_col))
overlap_window = 100  # Used to calculate the overlap-area and overlap-side.

# Load dark-field and flat-field images, average each of them.
flat_path = losa.find_file(input_base + "/" + df_name + "/*flat*")[0]
flat_field = np.mean(losa.load_hdf(flat_path, key_path)[:], axis=0)
dark_path = losa.find_file(input_base + "/" + df_name + "/*dark*")[0]
dark_field = np.mean(losa.load_hdf(dark_path, key_path)[:], axis=0)
# Load projection images of each scan as hdf objects
data_objects = []
list_depth = []
list_height = []
list_width = []
for i in range(num_scan_total):
    file_path = losa.find_file(input_base + "/" + proj_name[i] + "/*proj*")[0]
    hdf_object = losa.load_hdf(file_path, key_path)
    (depth1, height1, width1) = hdf_object.shape
    list_depth.append(depth1)
    list_height.append(height1)
    list_width.append(width1)
    data_objects.append(hdf_object)
Exemple #10
0
"""
The following example shows how to retrieve phase image from two stacks of
speckle-images and sample-images.

Referring to "example_01_*.py" to know how to find key-paths and datasets
in a hdf/nxs file or using the function "get_hdf_tree" in the
loadersaver.py module
"""

import timeit
import numpy as np
import algotom.io.loadersaver as losa
import algotom.prep.phase as ps

# If data is hdf-format
speckle_stack = losa.load_hdf("C:/user/data/ref_stack.hdf", "entry/data")
sample_stack = losa.load_hdf("C:/user/data/sam_stack.hdf", "entry/data")

#  # If data is tif-format.
# speckle_stack = losa.get_tif_stack("C:/user/data/ref/")
# sample_stack = losa.get_tif_stack("C://user/data/sam/")

output_base = "C:/user/output/"

num_use = 40  # Use 40 speckle-positions for calculation
speckle_stack = speckle_stack[:num_use, :, :]  # Data shape: 40 x 2560 x 2160
sample_stack = sample_stack[:num_use, :, :]
chunk_size = 100  # Process 100 rows in one go. Adjust to suit CPU/GPU memory.

t0 = timeit.default_timer()
# dim=2 is slow (>45 mins) if running on CPU.
Exemple #11
0
gpu = True
dim = 1  # Use 1D/2D-searching for finding shifts
win_size = 7  # Size of window around each pixel
margin = 10  # Searching range for finding shifts

slice_idx = 1200  # Slice to reconstruct

print("********************************")
print("*************Start**************")
print("********************************")

list_file = losa.find_file(input_base + "/*.nxs")
# Get keys to datasets
data_key = losa.find_hdf_key(list_file[0], "data/data")[0][0]
image_key = losa.find_hdf_key(list_file[0], "image_key")[0][-1]
data_obj = losa.load_hdf(list_file[0], data_key)
(height, width) = data_obj.shape[-2:]

# Define the ROI area to retrieve phase around the given slice (row) index
crop_top = slice_idx - 2 * margin
crop_bot = height - (slice_idx + 2 * margin)
crop_left = 0
crop_right = 0
crop = (crop_top, crop_bot, crop_left, crop_right)

# Get number of projections.
num_proj = []
for file in list_file:
    int_keys = losa.load_hdf(file, image_key)[:]
    num_proj1 = len(np.squeeze(np.asarray(np.where(int_keys == 0.0))))
    num_proj.append(num_proj1)
Exemple #12
0
sam_path = []
for scan in sam_num:
    sam_path.append(
        losa.find_file(input_base + "/k11-" + str(scan) + "/" +
                       "*imaging*")[0])
ref_path = []
for scan in ref_num:
    ref_path.append(
        losa.find_file(input_base + "/k11-" + str(scan) + "/" +
                       "*imaging*")[0])
dark_field_path = losa.find_file(input_base + "/k11-" + str(dark_field_num) +
                                 "/" + "*imaging*")[0]
data_key = "entry/detector/detector"
# Get dark-field image (camera noise). Note that it's different to dark-signal image.
dark_field = np.mean(losa.load_hdf(dark_field_path, data_key)[:], axis=0)

# Initial parameters
get_trans_dark_signal = True
num_use = 40  # Number of speckle positions used for phase retrieval.
gpu = True  # Use GPU for computing
chunk_size = 100  # Process 100 rows in one go. Adjust to suit CPU/GPU memory.
dim = 2  # Use 1D/2D-searching for finding shifts
win_size = 7  # Size of window around each pixel
margin = 10  # Searching range for finding shifts
align = True  # Align if there're shifts between speckle-images and sample-images
# Note to select ROIs without samples to calculate the shifts

print("********************************")
print("*************Start**************")
print("********************************")
import algotom.prep.correction as corr
import algotom.prep.calculation as calc
import algotom.rec.reconstruction as reco
import algotom.prep.removal as remo
import algotom.prep.filtering as filt
import algotom.util.utility as util

file_path = "E:/Tomo_data/68067.nxs"
output_base = "E:/tmp/output3/"

# Provide path to datasets in the nxs file.
data_key = "/entry1/tomo_entry/data/data"
image_key = "/entry1/tomo_entry/instrument/detector/image_key"
angle_key = "/entry1/tomo_entry/data/rotation_angle"

ikey = np.squeeze(np.asarray(losa.load_hdf(file_path, image_key)))
angles = np.squeeze(np.asarray(losa.load_hdf(file_path, angle_key)))
data = losa.load_hdf(file_path, data_key) # This is an object not ndarray.
(depth, height, width) = data.shape

# Load dark-field images and flat-field images, averaging each result.
print("1 -> Load dark-field and flat-field images, average each result")
dark_field = np.mean(np.asarray(data[np.squeeze(np.where(ikey==2.0)), :, :]), axis=0)
flat_field = np.mean(np.asarray(data[np.squeeze(np.where(ikey==1.0)), :, :]), axis=0)

# Perform flat-field correction in the projection space and save the result.
# Note that in this data, there're time-stamps at the top-left of images with
# binary gray-scale (size ~ 10 x 80). This gives rise to the zero-division
# warning. Algotom replaces zeros by the mean value or 1. We also can crop 10
# pixels from the top to avoid this problem.
print("2 -> Save few projection images as tifs")
# Using the following commands we'll get the keys for loading the only
# necessary datasets.
list_key, list_shape, _ = losa.find_hdf_key(file_path, "data")
for i, key in enumerate(list_key):
    # There're datasets with keys containing "data", we only choose a 3D array.
    if len(list_shape[i])==3:
        data_key = key
        break
image_key = losa.find_hdf_key(file_path, "image_key")[0][0]
# Results are:
# data_key = "/entry1/flyScanDetector/data"
# image_key = "/entry1/flyScanDetector/image_key"

# Load flat-field images, average them, and save the result as a tif image.
print("3 -> Load flat-field images, average them, and save the result: ")
data = losa.load_hdf(file_path, data_key) # This is an object. No data are loaded to the memory yet.
ikey = np.asarray(losa.load_hdf(file_path, image_key))
flat_field = np.mean(np.asarray(data[np.squeeze(np.where(ikey==1.0)), :,:]), axis=0)
losa.save_image(output_base + "/flat/flat_field.tif", flat_field)

# Load few projection images and save them as tifs.
print("4 -> Load projection images in a step of 250 and save to tiff: ")
proj_idx = np.squeeze(np.where(ikey == 0))
for i in range(0, len(proj_idx), 250):
    mat = data[proj_idx[i], :, :]
    name = "0000" + str(proj_idx[i])
    losa.save_image(output_base + "/projection/img_" + name[-5:] + ".tif", mat)

print("5 -> Same as 2 but using a built-in function: ")
# The above example can be done using the "converter" module as follows:
conv.extract_tif_from_hdf(file_path, output_base + "/projection2/",
Exemple #15
0
import timeit
import numpy as np
import algotom.io.loadersaver as losa
import algotom.prep.calculation as calc
import algotom.prep.filtering as filt
import algotom.prep.removal as rem
import algotom.rec.reconstruction as reco

phase_path = "C:/user/processed_data/phase.hdf"
dark_path = "C:/user/processed_data/dark_signal.hdf"
trans_path = "C:/user/processed_data/transmission.hdf"

output_base = "C:/user/processed_data/reconstruction/"

# Provide hdf-keys
phase_hdf = losa.load_hdf(phase_path, "entry/data")
dark_hdf = losa.load_hdf(dark_path, "entry/data")
trans_hdf = losa.load_hdf(trans_path, "entry/data")

(num_proj, height, width) = phase_hdf.shape

start_slice = 50
stop_slice = height - 1
step = 100

# Find the center of rotation using a transmission sinogram.
sino_center = trans_hdf[:, height // 2, :]
center = calc.find_center_vo(sino_center)
print("Center of rotation {}".format(center))

fluct_correct = False  # Using double-wedge filter to correct the
dark_path = "/i12/data/darks.hdf"
metadata_path = "/i12/data/metadata.nxs"

scan_type = "continuous"  # stage is freely rotated
# scan_type = "swinging" # stage is restricted to rotate back-and-forward between 0 and 180 degree.

# Provide paths (keys) to datasets in the hdf/nxs files.
hdf_key = "/entry/data/data"
angle_key = "/entry1/tomo_entry/data/rotation_angle"
num_proj_key = "/entry1/information/number_projections"

# Crop images if need to.
crop_left = 0
crop_right = 0

data = losa.load_hdf(proj_path, hdf_key)  # This is an hdf-object not ndarray.
(depth, height, width) = data.shape
left = crop_left
right = width - crop_right
if (stop_slice == -1) or (stop_slice > height - 1):
    stop_slice = height - 1

# Load metatdata
num_proj = int(np.asarray(losa.load_hdf(metadata_path, num_proj_key)))
num_tomo = depth // num_proj

angles = np.squeeze(np.asarray(losa.load_hdf(metadata_path, angle_key)))
# Sometime there's a mismatch between the number of acquired projections
# and number of angles due to technical reasons or early terminated scan.
# In such cases, we have to provide calculated angles.
if len(angles) < depth:
Exemple #17
0
import algotom.prep.correction as corr
import algotom.prep.removal as remo
import algotom.prep.calculation as calc
import algotom.prep.filtering as filt
import algotom.rec.reconstruction as reco

# Paths to data
proj_path = "D:/data/tomographic_projections.hdf"
flat_path = "D:/data/flats.hdf"
dark_path = "D:/data/darks.hdf"
coef_path = "D:/data/coefficients_bw.txt"
key_path = "/entry/data/data"
# Where to save the outputs
output_base = "D:/output/"
# Load data of projection images as an hdf object
proj_data = losa.load_hdf(proj_path, key_path)
(depth, height, width) = proj_data.shape
# Load flat-field images and dark-field images, average each of them
print("1 -> Load dark-field and flat-field images, average each result")
flat_field = np.mean(losa.load_hdf(flat_path, key_path)[:], axis=0)
dark_field = np.mean(losa.load_hdf(dark_path, key_path)[:], axis=0)
# Load distortion coefficients
print(
    "2 -> Load distortion coefficients, apply correction to averaged flat-field and dark-field images"
)
xcenter, ycenter, list_fact = losa.load_distortion_coefficient(coef_path)
# Apply distortion correction
flat_discor = corr.unwarp_projection(flat_field, xcenter, ycenter, list_fact)
dark_discor = corr.unwarp_projection(dark_field, xcenter, ycenter, list_fact)

# Generate a sinogram without distortion correction and perform reconstruction to compare latter.