Exemplo n.º 1
0
def log_values(args):
    """Log all values set in the args namespace.

    Arguments are grouped according to their section and logged alphabetically
    using the DEBUG log level thus --verbose is required.
    """
    args = args.__dict__

    log.warning('tomopy-cli status start')
    for section, name in zip(SECTIONS, NICE_NAMES):
        entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section]))

        # print('log_values', section, name, entries)
        if entries:
            log.info(name)

            for entry in entries:
                value = args[entry] if args[entry] is not None else "-"
                if (value == 'none'):
                    log.warning("  {:<16} {}".format(entry, value))
                elif (value is not False):
                    log.info("  {:<16} {}".format(entry, value))
                elif (value is False):
                    log.warning("  {:<16} {}".format(entry, value))

    log.warning('tomopy-cli status end')
def padding(data, rotation_axis, params):

    log.info("  *** padding")

    if (params.padding):
        log.info('  *** *** ON')
        N = data.shape[2]
        data_pad = np.zeros([data.shape[0], data.shape[1], 3 * N // 2],
                            dtype="float32")
        data_pad[:, :, N // 4:5 * N // 4] = data
        data_pad[:, :,
                 0:N // 4] = np.reshape(data[:, :, 0],
                                        [data.shape[0], data.shape[1], 1])
        data_pad[:, :,
                 5 * N // 4:] = np.reshape(data[:, :, -1],
                                           [data.shape[0], data.shape[1], 1])

        data = data_pad
        rot_center = rotation_axis + N // 4
    else:
        log.warning('  *** *** OFF')
        data = data
        rot_center = rotation_axis

    return data, rot_center
Exemplo n.º 3
0
def auto_read_dxchange(params):
    log.info('  *** Auto parameter reading from DXchange file.')
    params = read_pixel_size(params)
    params = read_scintillator(params)
    params = read_rot_center(params)
    log.info('  *** *** Done')
    return params
Exemplo n.º 4
0
def padding(data, rotation_axis, params):

    log.info("  *** padding")
    if ((params.reconstruction_algorithm == 'gridrec'
         and params.gridrec_padding)
            or (params.reconstruction_algorithm == 'lprec_fbp'
                and params.lprec_fbp_padding)):
        #if(params.padding):
        log.info('  *** *** ON')
        N = data.shape[2]
        data_pad = np.zeros([data.shape[0], data.shape[1], 3 * N // 2],
                            dtype="float32")
        data_pad[:, :, N // 4:5 * N // 4] = data
        data_pad[:, :,
                 0:N // 4] = np.reshape(data[:, :, 0],
                                        [data.shape[0], data.shape[1], 1])
        data_pad[:, :,
                 5 * N // 4:] = np.reshape(data[:, :, -1],
                                           [data.shape[0], data.shape[1], 1])

        data = data_pad
        rot_center = rotation_axis + N // 4
    else:
        log.warning('  *** *** OFF')
        data = data
        rot_center = rotation_axis

    return data, rot_center
def reconstruct(data, theta, rot_center, params):

    if (params.reconstruction_type == "try"):
        sinogram_order = True
    else:
        sinogram_order = False

    log.info("  *** algorithm: %s" % params.reconstruction_algorithm)
    if params.reconstruction_algorithm == 'astrasirt':
        extra_options = {'MinConstraint': 0}
        options = {
            'proj_type': 'cuda',
            'method': 'SIRT_CUDA',
            'num_iter': 200,
            'extra_options': extra_options
        }
        shift = (int((data.shape[2] / 2 - rot_center) + .5))
        data = np.roll(data, shift, axis=2)
        rec = tomopy.recon(data,
                           theta,
                           algorithm=tomopy.astra,
                           options=options)
    elif params.reconstruction_algorithm == 'astracgls':
        extra_options = {'MinConstraint': 0}
        options = {
            'proj_type': 'cuda',
            'method': 'CGLS_CUDA',
            'num_iter': 15,
            'extra_options': extra_options
        }
        shift = (int((data.shape[2] / 2 - rot_center) + .5))
        data = np.roll(data, shift, axis=2)
        rec = tomopy.recon(data,
                           theta,
                           algorithm=tomopy.astra,
                           options=options)
    elif params.reconstruction_algorithm == 'gridrec':
        log.warning("  *** *** sinogram_order: %s" % sinogram_order)
        rec = tomopy.recon(data,
                           theta,
                           center=rot_center,
                           sinogram_order=sinogram_order,
                           algorithm=params.reconstruction_algorithm,
                           filter_name=params.filter)
    else:
        log.warning("  *** *** algorithm: %s is not supported yet" %
                    params.reconstruction_algorithm)
        params.reconstruction_algorithm = 'gridrec'
        log.warning("  *** *** using: %s instead" %
                    params.reconstruction_algorithm)
        log.warning("  *** *** sinogram_order: %s" % sinogram_order)
        rec = tomopy.recon(data,
                           theta,
                           center=rot_center,
                           sinogram_order=sinogram_order,
                           algorithm=params.reconstruction_algorithm,
                           filter_name=params.filter)

    return rec
Exemplo n.º 6
0
def minus_log(data, params):

    log.info("  *** minus log")
    if (params.minus_log):
        log.info('  *** *** ON')
        data = tomopy.minus_log(data)
    else:
        log.warning('  *** *** OFF')

    return data
def unpadding(rec, N, params):

    log.info("  *** un-padding")
    if (params.padding):
        log.info('  *** *** ON')
        rec = rec[:, N // 4:5 * N // 4, N // 4:5 * N // 4]
    else:
        log.warning('  *** *** OFF')
        rec = rec
    return rec
def binning(proj, flat, dark, params):

    log.info("  *** binning")
    if (params.binning == 0):
        log.info('  *** *** OFF')
    else:
        log.warning('  *** *** ON')
        log.warning('  *** *** binning: %d' % params.binning)
        proj = _binning(proj, params)
        flat = _binning(flat, params)
        dark = _binning(dark, params)

    return proj, flat, dark
Exemplo n.º 9
0
def unpadding(rec, N, params):

    log.info("  *** un-padding")
    if ((params.reconstruction_algorithm == 'gridrec'
         and params.gridrec_padding)
            or (params.reconstruction_algorithm == 'lprec_fbp'
                and params.lprec_fbp_padding)):
        #if(params.padding):
        log.info('  *** *** ON')
        rec = rec[:, N // 4:5 * N // 4, N // 4:5 * N // 4]
    else:
        log.warning('  *** *** OFF')
        rec = rec
    return rec
Exemplo n.º 10
0
def _read_tomo(params, sino):

    if (str(params.file_format) in {'dx', 'aps2bm', 'aps7bm', 'aps32id'}):
        proj, flat, dark, theta = dxchange.read_aps_32id(params.file_name,
                                                         sino=sino)
        log.info("  *** %s is a valid dx file format" % params.file_name)
    # elif:
    #     # add here other dxchange loader
    #     log.info("  *** %s is a valid xxx file format" % params.file_name)

    else:
        log.error("  *** %s is not a supported file format" %
                  params.file_format)
        exit()
    return proj, flat, dark, theta
def read_tomo(sino, params):

    if params.hdf_file_type == 'standard':
        # Read APS 32-BM raw data.
        log.info("  *** loading a stardard data set: %s" % params.hdf_file)
        proj, flat, dark, theta = dxchange.read_aps_32id(params.hdf_file,
                                                         sino=sino)
    elif params.hdf_file_type == 'flip_and_stich':
        log.info("   *** loading a 360 deg flipped data set: %s" %
                 params.hdf_file)
        proj360, flat360, dark360, theta360 = dxchange.read_aps_32id(
            params.hdf_file, sino=sino)
        proj, flat, dark = flip_and_stitch(variableDict, proj360, flat360,
                                           dark360)
        theta = theta360[:len(theta360) // 2]  # take first half
    else:  # params.hdf_file_type == 'mosaic':
        log.error("   *** loading a mosaic data set is not supported yet")
        exit()

    if params.reverse:
        log.info("  *** correcting for 180-0 data collection")
        step_size = (theta[1] - theta[0])
        theta_size = dxreader.read_dx_dims(params.hdf_file, 'data')[0]
        theta = np.linspace(np.pi, (0 + step_size), theta_size)

    if params.blocked_views:
        log.info("  *** correcting for blocked view data collection")
        miss_angles = [params.missing_angles_start, params.missing_angle_end]

        # Manage the missing angles:
        proj = np.concatenate(
            (proj[0:miss_angles[0], :, :], proj[miss_angles[1] + 1:-1, :, :]),
            axis=0)
        theta = np.concatenate(
            (theta[0:miss_angles[0]], theta[miss_angles[1] + 1:-1]))

    # new missing projection handling
    # if params.blocked_views:
    #     log.warning("  *** new missing angle handling")
    #     miss_angles = [params.missing_angles_start, params.missing_angle_end]
    #     data = patch_projection(data, miss_angles)

    proj, flat, dark = binning(proj, flat, dark, params)

    rotation_axis = params.rotation_axis / np.power(2, float(params.binning))
    log.info("  *** rotation center: %f" % rotation_axis)

    return proj, flat, dark, theta, rotation_axis
Exemplo n.º 12
0
def blocked_view(proj, theta, params):
    log.info("  *** correcting for blocked view data collection")
    if params.blocked_views:
        log.warning('  *** *** ON')
        miss_angles = [params.missing_angles_start, params.missing_angles_end]

        # Manage the missing angles:
        proj = np.concatenate(
            (proj[0:miss_angles[0], :, :], proj[miss_angles[1] + 1:-1, :, :]),
            axis=0)
        theta = np.concatenate(
            (theta[0:miss_angles[0]], theta[miss_angles[1] + 1:-1]))
    else:
        log.warning('  *** *** OFF')

    return proj, theta
def mask(data, params):

    log.info("  *** mask")
    if (params.reconstruction_mask):
        log.info('  *** *** ON')
        if 0 < params.reconstruction_mask_ratio <= 1:
            log.warning("  *** mask ratio: %f " %
                        params.reconstruction_mask_ratio)
            data = tomopy.circ_mask(data,
                                    axis=0,
                                    ratio=params.reconstruction_mask_ratio)
        else:
            log.error("  *** mask ratio must be between 0-1: %f is ignored" %
                      params.reconstruction_mask_ratio)
    else:
        log.warning('  *** *** OFF')
    return data
Exemplo n.º 14
0
def find_rotation_axis(params):

    fname = params.file_name
    ra_fname = params.rotation_axis_file

    if os.path.isfile(fname):
        return _find_rotation_axis(params)

    elif os.path.isdir(fname):
        # Add a trailing slash if missing
        top = os.path.join(fname, '')

        # log.info(os.listdir(top))
        h5_file_list = list(
            filter(lambda x: x.endswith(('.h5', '.hdf')), os.listdir(top)))
        h5_file_list.sort()

        log.info("Found: %s" % h5_file_list)
        log.info("Determining the rotation axis location")

        dic_centers = {}
        i = 0
        for fname in h5_file_list:
            h5fname = top + fname
            params.file_name = h5fname
            rot_center = _find_rotation_axis(params)
            params.file_name = top
            case = {fname: rot_center}
            log.info("  *** file: %s; rotation axis %f" % (fname, rot_center))
            dic_centers[i] = case
            i += 1

        # Set the json file name that will store the rotation axis positions.
        jfname = top + ra_fname
        # Save json file containing the rotation axis
        json_dump = json.dumps(dic_centers)
        f = open(jfname, "w")
        f.write(json_dump)
        f.close()
        log.info("Rotation axis locations save in: %s" % jfname)

    else:
        log.info("Directory or File Name does not exist: %s " % fname)
Exemplo n.º 15
0
def initialize(params):
    '''Initializes the beam hardening correction code.
    '''
    log.info('  *** beam hardening')
    if params.beam_hardening_method != 'standard':
        log.info('   *** *** OFF')
    fread_config_file()
    global spectra_dict
    spectra_dict = fread_source_data()
    parse_params(params)
    center_row = find_center_row(params)
    log.info(
        "  *** *** Center row for beam hardening = {0:f}".format(center_row))
    if int(params.binning) > 0:
        center_row /= pow(2, int(params.binning))
        log.info(
            "  *** *** Center row after binning = {:f}".format(center_row))
    params.center_row = center_row
    log.info('  *** *** beam hardening initialization finished')
Exemplo n.º 16
0
def fread_source_data():
    '''Reads the spectral power data from files.
    Data file comes from the BM spectrum module in XOP.
    Return:
    Dictionary of spectra at the various psi angles from the ring plane.
    '''
    spectra_dict = {}
    file_list = list(
        filter(lambda x: x.endswith(('.dat', '.DAT')), os.listdir(data_path)))
    for f_name in file_list:
        f_path = os.path.join(data_path, f_name)
        #print(f_path)
        if os.path.isfile(f_path) and f_name.startswith('Psi'):
            log.info('  *** *** source file {:s} located'.format(f_name))
            f_angle = float(f_name.split('_')[1][:2])
            spectral_data = np.genfromtxt(f_path, comments='!')
            spectral_energies = spectral_data[:, 0] / 1000.
            spectral_power = spectral_data[:, 1]
            spectra_dict[f_angle] = Spectrum(spectral_energies, spectral_power)
    return spectra_dict
Exemplo n.º 17
0
def read_rot_center(params):
    """
    Read the rotation center from /process group in the DXchange file.
    Return: rotation center from this dataset or None if it doesn't exist.
    """
    log.info('  *** *** rotation axis')
    #First, try to read from the /process/tomopy-cli parameters
    with h5py.File(params.file_name, 'r') as file_name:
        try:
            dataset = '/process' + '/tomopy-cli-' + __version__ + '/' + 'find-rotation-axis' + '/' + 'rotation-axis'
            params.rotation_axis = float(file_name[dataset][0])
            log.info(
                '  *** *** Rotation center read from HDF5 file: {0:f}'.format(
                    params.rotation_axis))
            return params
        except (KeyError, ValueError):
            log.warning('  *** *** No rotation center stored in the HDF5 file')
    #If we get here, we need to either find it automatically or from config file.
    log.warning('  *** *** No rotation axis stored in DXchange file')
    if (params.rotation_axis_auto == True):
        log.warning('  *** *** Auto axis location requested')
        log.warning('  *** *** Computing rotation axis')
        params.rotation_axis = find_center.find_rotation_axis(params)
    log.info('  *** *** using config file value of {:f}'.format(
        params.rotation_axis))
    return params
Exemplo n.º 18
0
def read_pixel_size(params):
    '''
    Read the pixel size and magnification from the DXchange file.
    Use to compute the effective pixel size.
    '''
    log.info('  *** auto pixel size reading')
    if params.pixel_size_auto != True:
        log.info('  *** *** OFF')
        return params
    pixel_size = config.param_from_dxchange(
        params.file_name, '/measurement/instrument/detector/pixel_size_x')
    mag = config.param_from_dxchange(
        params.file_name,
        '/measurement/instrument/detection_system/objective/magnification')
    #Handle case where something wasn't read right
    if not (pixel_size and mag):
        log.warning('  *** *** problem reading pixel size from DXchange')
        return params
    #What if pixel size isn't in microns, but in mm or m?
    for i in range(3):
        if pixel_size < 0.5:
            pixel_size *= 1e3
        else:
            break
    params.pixel_size = pixel_size / mag
    log.info('  *** *** effective pixel size = {:6.4e} microns'.format(
        params.pixel_size))
    return params
Exemplo n.º 19
0
def _find_rotation_axis(params):

    log.info("  *** calculating automatic center")
    data_size = file_io.get_dx_dims(params)
    ssino = int(data_size[1] * params.nsino)

    # Select sinogram range to reconstruct
    sino_start = ssino
    sino_end = sino_start + pow(2, int(params.binning))

    sino = (int(sino_start), int(sino_end))

    # Read APS 32-BM raw data
    proj, flat, dark, theta, params_rotation_axis_ignored = file_io.read_tomo(
        sino, params)

    # apply all preprocessing functions
    data = prep.all(proj, flat, dark, params, sino)

    # find rotation center
    log.info("  *** find_center vo")
    rot_center = tomopy.find_center_vo(data)
    log.info("  *** automatic center: %f" % rot_center)

    return rot_center * np.power(2, float(params.binning))
Exemplo n.º 20
0
def beamhardening_correct(data, params, sino):
    """
    Performs beam hardening corrections.
    Inputs
    data: data normalized already for bright and dark corrections.
    params: processing parameters
    sino: row numbers for these data
    """
    log.info("  *** correct beam hardening")
    data_dtype = data.dtype
    #Correct for centerline of fan
    data = beamhardening.fcorrect_as_pathlength_centerline(data)
    #Make an array of correction factors
    beamhardening.center_row = params.center_row
    log.info("  *** *** Beam hardening center row = {:f}".format(
        beamhardening.center_row))
    angles = np.abs(np.arange(sino[0], sino[1]) -
                    beamhardening.center_row).astype(data_dtype)
    angles *= beamhardening.pixel_size / beamhardening.d_source
    log.info("  *** *** angles from {0:f} to {1:f} urad".format(
        angles[0], angles[-1]))
    correction_factor = beamhardening.angular_spline(angles).astype(data_dtype)
    if len(data.shape) == 2:
        return data * correction_factor[:, None]
    else:
        return data * correction_factor[None, :, None]
Exemplo n.º 21
0
def write_hdf(config_file, args=None, sections=None):
    """
    Write in the hdf raw data file the content of *config_file* with values from *args* 
    if they are specified, otherwise use the defaults. If *sections* are specified, 
    write values from *args* only to those sections, use the defaults on the remaining ones.
    """
    if not args.dx_update:
        log.warning("  *** Not saving log data to the projection HDF file.")
        return
    with h5py.File(args.file_name,'r+') as hdf_file:
        #If the group we will write to already exists, remove it
        if hdf_file.get('/process/tomopy-cli-' + __version__):
            del(hdf_file['/process/tomopy-cli-' + __version__])
        #dt = h5py.string_dtype(encoding='ascii')
        log.info("  *** tomopy.conf parameter written to /process%s in file %s " % (__version__, args.file_name))
        config = configparser.ConfigParser()
        for section in SECTIONS:
            config.add_section(section)
            for name, opts in SECTIONS[section].items():
                if args and sections and section in sections and hasattr(args, name.replace('-', '_')):
                    value = getattr(args, name.replace('-', '_'))
                    if isinstance(value, list):
                        # print(type(value), value)
                        value = ', '.join(value)
                else:
                    value = opts['default'] if opts['default'] is not None else ''

                prefix = '# ' if value is '' else ''

                if name != 'config':
                    dataset = '/process' + '/tomopy-cli-' + __version__ + '/' + section + '/'+ name
                    dset_length = len(str(value)) * 2 if len(str(value)) > 5 else 10
                    dt = 'S{0:d}'.format(dset_length)
                    hdf_file.require_dataset(dataset, shape=(1,), dtype=dt)
                    log.info(name + ': ' + str(value))
                    try:
                        hdf_file[dataset][0] = np.string_(str(value))
                    except TypeError:
                        print(value)
                        raise TypeError
Exemplo n.º 22
0
def zinger_removal(proj, flat, params):

    log.info("  *** zinger removal")
    if (params.zinger_removal_method == 'standard'):
        log.info('  *** *** ON')
        log.info("  *** *** zinger level projections: %d" %
                 params.zinger_level_projections)
        log.info("  *** *** zinger level white: %s" %
                 params.zinger_level_white)
        log.info("  *** *** zinger_size: %d" % params.zinger_size)
        proj = tomopy.misc.corr.remove_outlier(proj,
                                               params.zinger_level_projections,
                                               size=params.zinger_size,
                                               axis=0)
        flat = tomopy.misc.corr.remove_outlier(flat,
                                               params.zinger_level_white,
                                               size=params.zinger_size,
                                               axis=0)
    elif (params.zinger_removal_method == 'none'):
        log.warning('  *** *** OFF')

    return proj, flat
Exemplo n.º 23
0
def read_tomo(sino, params):
    """
    Read in the tomography data.
    Inputs:
    sino: tuple of (start_row, end_row) to be read in
    params: parameters for reconstruction
    Output:
    projection data
    flat field (bright) data
    dark field data
    theta: Numpy array of angle for each projection
    rotation_axis: location of the rotation axis
    """
    if params.file_type == 'standard':
        # Read APS 32-BM raw data.
        log.info("  *** loading a stardard data set: %s" % params.file_name)
        proj, flat, dark, theta = _read_tomo(params, sino=sino)
    elif params.file_type == 'flip_and_stich':
        log.info("   *** loading a 360 deg flipped data set: %s" %
                 params.file_name)
        proj360, flat360, dark360, theta360 = _read_tomo(params, sino=sino)
        proj, flat, dark = flip_and_stitch(variableDict, proj360, flat360,
                                           dark360)
        theta = theta360[:len(theta360) // 2]  # take first half
    else:  # params.file_type == 'mosaic':
        log.error("   *** loading a mosaic data set is not supported yet")
        exit()

    if params.reverse:
        log.info("  *** correcting for 180-0 data collection")
        step_size = (theta[1] - theta[0])
        theta_size = _read_theta_size(params)
        theta = np.linspace(np.pi, (0 + step_size), theta_size)

    proj, theta = blocked_view(proj, theta, params)

    # new missing projection handling
    # if params.blocked_views:
    #     log.warning("  *** new missing angle handling")
    #     miss_angles = [params.missing_angles_start, params.missing_angle_end]
    #     data = patch_projection(data, miss_angles)

    proj, flat, dark = binning(proj, flat, dark, params)

    rotation_axis = params.rotation_axis / np.power(2, float(params.binning))
    log.info("  *** rotation center: %f" % rotation_axis)

    return proj, flat, dark, theta, rotation_axis
Exemplo n.º 24
0
def read_scintillator(params):
    '''Read the scintillator type and thickness from DXchange.
    '''
    if params.scintillator_auto and params.beam_hardening_method.lower(
    ) == 'standard':
        log.info('  *** *** Find scintillator params from DXchange')
        params.scintillator_thickness = float(
            config.param_from_dxchange(
                params.file_name,
                '/measurement/instrument/detection_system/scintillator/scintillating_thickness',
                attr=None,
                scalar=True,
                char_array=False))
        log.info('  *** *** scintillator thickness = {:f}'.format(
            params.scintillator_thickness))
        scint_material_string = config.param_from_dxchange(
            params.file_name,
            '/measurement/instrument/detection_system/scintillator/description',
            scalar=False,
            char_array=True)
        if scint_material_string.lower().startswith('luag'):
            params.scintillator_material = 'LuAG_Ce'
        elif scint_material_string.lower().startswith('lyso'):
            params.scintillator_material = 'LYSO_Ce'
        elif scint_material_string.lower().startswith('yag'):
            params.scintillator_material = 'YAG_Ce'
        else:
            log.warning('  *** *** scintillator {:s} not recognized!'.format(
                scint_material_string))
        log.warning('  *** *** using scintillator {:s}'.format(
            params.scintillator_material))
    #Run the initialization for beam hardening.  Needed in case rotation_axis must
    #be computed later.
    if params.beam_hardening_method.lower() == 'standard':
        beamhardening.initialize(params)
    return params
Exemplo n.º 25
0
def remove_nan_neg_inf(data, params):

    log.info('  *** remove nan, neg and inf')
    if (params.fix_nan_and_inf == True):
        log.info('  *** *** ON')
        log.info('  *** *** replacement value %f ' %
                 params.fix_nan_and_inf_value)
        data = tomopy.remove_nan(data, val=params.fix_nan_and_inf_value)
        data = tomopy.remove_neg(data, val=params.fix_nan_and_inf_value)
        data[np.where(data == np.inf)] = params.fix_nan_and_inf_value
    else:
        log.warning('  *** *** OFF')

    return data
Exemplo n.º 26
0
def flat_correction(proj, flat, dark, params):

    log.info('  *** normalization')
    if (params.flat_correction_method == 'standard'):
        data = tomopy.normalize(proj,
                                flat,
                                dark,
                                cutoff=params.normalization_cutoff)
        log.info('  *** *** ON %f cut-off' % params.normalization_cutoff)
    elif (params.flat_correction_method == 'air'):
        data = tomopy.normalize_bg(proj, air=params.air)
        log.info('  *** *** air %d pixels' % params.air)
    elif (params.flat_correction_method == 'none'):
        data = proj
        log.warning('  *** *** normalization is turned off')

    return data
Exemplo n.º 27
0
def segment(params):

    # slice/full reconstruction file location
    tail = os.sep + os.path.splitext(os.path.basename(
        params.hdf_file))[0] + '_rec' + os.sep
    top = os.path.dirname(params.hdf_file) + '_rec' + tail

    # log.info(os.listdir(top))
    if os.path.isdir(top):
        rec_file_list = list(
            filter(lambda x: x.endswith(('.tiff', '.tif')), os.listdir(top)))
        rec_file_list.sort()

        log.info('found in %s' % top)
        log.info('files %s' % rec_file_list)
        log.info('applying segmentation')
        log.warning('not implemented')
    else:
        log.error("ERROR: the directory %s does not exist" % top)
        log.error("ERROR: to create one run a full reconstruction first:")
        log.error(
            "ERROR: $ tomopy recon --reconstruction-type full --hdf-file %s" %
            params.hdf_file)
Exemplo n.º 28
0
def remove_stripe(data, params):

    log.info('  *** remove stripe:')
    if (params.remove_stripe_method == 'fw'):
        log.info('  *** *** fourier wavelet')
        data = tomopy.remove_stripe_fw(data,
                                       level=params.fw_level,
                                       wname=params.fw_filter,
                                       sigma=params.fw_sigma,
                                       pad=params.fw_pad)
        log.info('  *** ***  *** fw level %d ' % params.fw_level)
        log.info('  *** ***  *** fw wname %s ' % params.fw_filter)
        log.info('  *** ***  *** fw sigma %f ' % params.fw_sigma)
        log.info('  *** ***  *** fw pad %r ' % params.fw_pad)
    elif (params.remove_stripe_method == 'ti'):
        log.info('  *** *** titarenko')
        data = tomopy.remove_stripe_ti(data,
                                       nblock=params.ti_nblock,
                                       alpha=params.ti_alpha)
        log.info('  *** ***  *** ti nblock %d ' % params.ti_nblock)
        log.info('  *** ***  *** ti alpha %f ' % params.ti_alpha)
    elif (params.remove_stripe_method == 'sf'):
        log.info('  *** *** smoothing filter')
        data = tomopy.remove_stripe_sf(data, size == params.sf_size)
        log.info('  *** ***  *** sf size %d ' % params.sf_size)
    elif (params.remove_stripe_method == 'none'):
        log.warning('  *** *** OFF')

    return data
Exemplo n.º 29
0
def phase_retrieval(data, params):

    log.info("  *** retrieve phase")
    if (params.retrieve_phase_method == 'paganin'):
        log.info('  *** *** paganin')
        log.info("  *** *** pixel size: %s" % params.pixel_size)
        log.info("  *** *** sample detector distance: %s" %
                 params.propagation_distance)
        log.info("  *** *** energy: %s" % params.energy)
        log.info("  *** *** alpha: %s" % params.retrieve_phase_alpha)
        data = tomopy.retrieve_phase(data,
                                     pixel_size=(params.pixel_size * 1e-4),
                                     dist=(params.propagation_distance / 10.0),
                                     energy=params.energy,
                                     alpha=params.retrieve_phase_alpha,
                                     pad=True)
    elif (params.retrieve_phase_method == 'none'):
        log.warning('  *** *** OFF')

    return data
def rec(params):

    data_shape = file_io.get_dx_dims(params)

    if params.rotation_axis < 0:
        params.rotation_axis = data_shape[2] / 2

    # Select sinogram range to reconstruct
    if (params.reconstruction_type == "full"):
        nSino_per_chunk = params.nsino_per_chunk
        chunks = int(np.ceil(data_shape[1] / nSino_per_chunk))
        sino_start = 0
        sino_end = chunks * nSino_per_chunk

    else:  # "slice" and "try"
        nSino_per_chunk = pow(2, int(params.binning))
        chunks = 1
        ssino = int(data_shape[1] * params.nsino)
        sino_start = ssino
        sino_end = sino_start + pow(2, int(params.binning))


    log.info("reconstructing [%d] slices from slice [%d] to [%d] in [%d] chunks of [%d] slices each" % \
               ((sino_end - sino_start)/pow(2, int(params.binning)), sino_start/pow(2, int(params.binning)), sino_end/pow(2, int(params.binning)), \
               chunks, nSino_per_chunk/pow(2, int(params.binning))))

    strt = 0
    for iChunk in range(0, chunks):
        log.info('chunk # %i/%i' % (iChunk, chunks))
        sino_chunk_start = np.int(sino_start + nSino_per_chunk * iChunk)
        sino_chunk_end = np.int(sino_start + nSino_per_chunk * (iChunk + 1))
        log.info('  *** [%i, %i]' %
                 (sino_chunk_start / pow(2, int(params.binning)),
                  sino_chunk_end / pow(2, int(params.binning))))

        if sino_chunk_end > sino_end:
            break

        sino = (int(sino_chunk_start), int(sino_chunk_end))

        # Read APS 32-BM raw data.
        proj, flat, dark, theta, rotation_axis = file_io.read_tomo(
            sino, params)

        # apply all preprocessing functions
        data = prep.all(proj, flat, dark, params)

        # Reconstruct
        if (params.reconstruction_type == "try"):
            # try passes an array of rotation centers and this is only supported by gridrec
            reconstruction_algorithm_org = params.reconstruction_algorithm
            params.reconstruction_algorithm = 'gridrec'

            center_search_width = params.center_search_width / np.power(
                2, float(params.binning))
            center_range = (rotation_axis - center_search_width,
                            rotation_axis + center_search_width, 0.5)
            stack = np.empty(
                (len(np.arange(*center_range)), data_shape[0],
                 int(data_shape[2] / np.power(2, float(params.binning)))))
            index = 0
            for axis in np.arange(*center_range):
                stack[index] = data[:, 0, :]
                index = index + 1
            log.warning(
                '  reconstruct slice [%d] with rotation axis range [%.2f - %.2f] in [%.2f] pixel steps'
                % (ssino, center_range[0], center_range[1], center_range[2]))

            rotation_axis = np.arange(*center_range)
            rec = padded_rec(stack, theta, rotation_axis, params)

            # Save images to a temporary folder.
            fname = os.path.dirname(
                params.hdf_file
            ) + '_rec' + os.sep + 'try_center' + os.sep + file_io.path_base_name(
                params.hdf_file) + os.sep + 'recon_'
            index = 0
            for axis in np.arange(*center_range):
                rfname = fname + str('{0:.2f}'.format(
                    axis * np.power(2, float(params.binning))) + '.tiff')
                dxchange.write_tiff(rec[index], fname=rfname, overwrite=True)
                index = index + 1

            # restore original method
            params.reconstruction_algorithm = reconstruction_algorithm_org

        else:  # "slice" and "full"
            rec = padded_rec(data, theta, rotation_axis, params)

            # handling of the last chunk
            if (params.reconstruction_type == "full"):
                if (iChunk == chunks - 1):
                    log.info("handling of the last chunk")
                    log.info("  *** chunk # %d" % (chunks))
                    log.info("  *** last rec size %d" %
                             ((data_shape[1] -
                               (chunks - 1) * nSino_per_chunk) /
                              pow(2, int(params.binning))))
                    rec = rec[0:data_shape[1] -
                              (chunks - 1) * nSino_per_chunk, :, :]

            # Save images
            if (params.reconstruction_type == "full"):
                tail = os.sep + os.path.splitext(
                    os.path.basename(params.hdf_file))[0] + '_rec' + os.sep
                fname = os.path.dirname(
                    params.hdf_file) + '_rec' + tail + 'recon'
                dxchange.write_tiff_stack(rec, fname=fname, start=strt)
                strt += int(
                    (sino[1] - sino[0]) / np.power(2, float(params.binning)))
            if (params.reconstruction_type == "slice"):
                fname = os.path.dirname(
                    params.hdf_file
                ) + os.sep + 'slice_rec/recon_' + os.path.splitext(
                    os.path.basename(params.hdf_file))[0]
                dxchange.write_tiff_stack(rec, fname=fname, overwrite=False)

        log.info("  *** reconstructions: %s" % fname)