Пример #1
0
def files_to_array(list_sample_files,
                   list_ref_files,
                   list_dark_files,
                   idx4crop=[0, -1, 0, -1]):

    img = wpu.crop_matrix_at_indexes(dxchange.read_tiff(list_sample_files[0]),
                                     idx4crop)

    (nlines, ncolums) = img.shape

    img_stack = np.zeros((len(list_sample_files), nlines, ncolums))
    ref_stack = img_stack * 0.0

    dark_im = img_stack[0, :, :] * 0.0

    for i in range(len(list_dark_files)):

        dark_im += wpu.crop_matrix_at_indexes(
            dxchange.read_tiff(list_dark_files[i]), idx4crop)

    for i in range(len(list_sample_files)):

        img_stack[i, :, :] = wpu.crop_matrix_at_indexes(
            dxchange.read_tiff(list_sample_files[i]), idx4crop) - dark_im

        ref_stack[i, :, :] = wpu.crop_matrix_at_indexes(
            dxchange.read_tiff(list_ref_files[i]), idx4crop) - dark_im

    return img_stack, ref_stack
def _func(i):

    wpu.print_blue("MESSAGE: loop " + str(i) + ": " + \
                   listOfDataFiles[i])

    if idx == [0, -1, 0, -1]:
        img = dxchange.read_tiff(listOfDataFiles[i]) - dark_im
    else:
        img = wpu.crop_matrix_at_indexes(
            dxchange.read_tiff(listOfDataFiles[i]), idx) - dark_im

    pv, ph = period_harm_Vert, period_harm_Horz

    pv = int(period_harm_Vert / (sourceDistance + zvec[i] - zvec[0]) *
             sourceDistance)
    ph = int(period_harm_Horz / (sourceDistance + zvec[i] - zvec[0]) *
             sourceDistance)

    wgi.plot_harmonic_grid(img, [pv, ph], isFFT=False)

    plt.savefig('FFT_{:04.0f}_ms_'.format(tvec[i] * 1e3) +
                '{:04.0f}mm.png'.format(zvec[i] * 1e3))
    plt.show(block=False)
    plt.close()

    #    wgi.plot_harmonic_peak(img,
    #                           [pv, ph],
    #                           isFFT=False,
    #                           fname='FFT_peaks_{:04.0f}_ms_'.format(tvec[i]*1e3) +
    #                                   '{:04.0f}mm.png'.format(zvec[i]*1e3))
    #
    #
    #    plt.close()

    return wgi.visib_1st_harmonics(img, [pv, ph], searchRegion=10)
Пример #3
0
def read_data(idd, step):

    name = '/home/beams/XYU/chip_pillar_interlace_2/projection_%06d.h5' % (idd)
    fid = h5py.File(name, 'r')
    # read scan positions
    tmp = fid['/positions'][::step] * 70976797.5332996
    scan = np.zeros([2, 1, tmp.shape[0]], dtype='float32', order='C')
    nscan = tmp.shape[0]
    scan[0, :, :] = tmp[:, 1] + 533
    scan[1, :, :] = tmp[:, 0] + 428

    ids = np.where((scan[0, 0] >= 0) * (scan[0, 0] < 1024) *
                   (scan[1, 0] >= 0) * (scan[1, 0] < 768))[0]
    scan = np.array(scan[:, :, ids], order='C')  # important!
    nscan = scan.shape[2]
    # import matplotlib.pyplot as plt
    # plt.plot(scan[1,0], scan[0,0],'.', color='blue')
    # plt.savefig('scan.png',dpi=500)
    data = np.zeros([1, nscan, 256, 256], dtype='float32')
    tmp = np.fft.fftshift(fid['data'][::step, :, :],
                          axes=(1, 2)).astype('float32')
    data[0] = tmp[ids]
    # read probe initial guess
    prb = np.ones([1, 256, 256], dtype='complex64')
    prbamp = dxchange.read_tiff(
        '/home/beams/XYU/chip_pillar_interlace_2/prbamp.tiff')
    prbangle = dxchange.read_tiff(
        '/home/beams/XYU/chip_pillar_interlace_2/prbangle.tiff')
    prb[0] = prbamp * np.exp(1j * prbangle)
    # initial guess for psi (can be read)
    psi = np.zeros([1, 768 + 256, 1024 + 256], dtype='complex64',
                   order='C') + 1
    return psi, prb, scan, data
Пример #4
0
def tiff2hdf5(src_folder,
              dest_folder,
              dest_fname,
              pattern='recon_*.tiff',
              display_min=None,
              display_max=None,
              dtype='int8'):

    dest_fname = check_fname_ext(dest_fname, 'h5')
    filelist = glob.glob(os.path.join(src_folder, pattern))
    filelist = sorted(filelist)
    n_files = len(filelist)
    temp = dxchange.read_tiff(filelist[0])
    full_shape = np.array([n_files, temp.shape[0], temp.shape[1]])
    if rank == 0:
        if not os.path.exists(dest_folder):
            os.mkdir(dest_folder)
        f = h5py.File(os.path.join(dest_folder, dest_fname))
    comm.Barrier()
    if rank != 0:
        f = h5py.File(os.path.join(dest_folder, dest_fname))
    grp = f.create_group('exchange')
    grp.create_dataset('data', full_shape, dtype='float32')
    alloc_set = allocate_mpi_subsets(n_files, size)
    dset = f['exchange/data']
    for i in alloc_set[rank]:
        img = dxchange.read_tiff(filelist[i])
        dset[i, :, :] = img
        print('    Rank: {:d}, file: {:d}'.format(rank, i))
    comm.Barrier()
    hdf5_cast(os.path.join(dest_folder, dest_fname),
              display_min=display_min,
              display_max=display_max,
              dtype=dtype)
    return
Пример #5
0
def files_to_array(list_sample_files, list_ref_files, list_dark_files,
                   idx4crop=[0, -1, 0, -1]):

    img = wpu.crop_matrix_at_indexes(dxchange.read_tiff(list_sample_files[0]),
                                     idx4crop)

    (nlines, ncolums) = img.shape

    img_stack = np.zeros((len(list_sample_files), nlines, ncolums))
    ref_stack = img_stack*0.0

    dark_im = img_stack[0, :, :]*0.0

    for i in range(len(list_dark_files)):

        dark_im += wpu.crop_matrix_at_indexes(dxchange.read_tiff(list_dark_files[i]),
                                              idx4crop)

    for i in range(len(list_sample_files)):

        img_stack[i, :, :] = wpu.crop_matrix_at_indexes(dxchange.read_tiff(list_sample_files[i]),
                                                  idx4crop) - dark_im


        ref_stack[i, :, :] = wpu.crop_matrix_at_indexes(dxchange.read_tiff(list_ref_files[i]),
                                                        idx4crop) - dark_im

    return img_stack, ref_stack
Пример #6
0
    def create_dataset(self, dataset_file):
        """Create a dataset for testing this module.
        Only called with setUp detects that `dataset_file` has been deleted.
        """
        import dxchange

        delta = dxchange.read_tiff('data/delta-chip-128.tiff')[::4, ::4, ::4]
        beta = dxchange.read_tiff('data/beta-chip-128.tiff')[::4, ::4, ::4]
        self.original = delta + 1j * beta

        self.theta = np.linspace(0, 2 * np.pi, 16, endpoint=False)
        self.tilt = np.pi / 3

        self.data = tike.lamino.simulate(self.original, self.theta, self.tilt)
        assert self.data.shape == (16, 32, 32)
        assert self.data.dtype == 'complex64', self.data.dtype

        setup_data = [
            self.data,
            self.original,
            self.theta,
            self.tilt,
        ]

        with lzma.open(dataset_file, 'wb') as file:
            pickle.dump(setup_data, file)
Пример #7
0
def load_tiff_stack(path, rand_batch_size=None):

    filelist = glob.glob(os.path.join(path, '*.tif*'))
    if rand_batch_size is not None:
        filelist = np.random.choice(filelist, rand_batch_size).tolist().sort()
    temp = dxchange.read_tiff(filelist[0])
    shape = np.squeeze(temp).shape
    arr = np.zeros([len(filelist), shape[0], shape[1]])
    for (i, f) in enumerate(filelist):
        arr[i, :, :] = np.squeeze(dxchange.read_tiff(f))

    return arr
Пример #8
0
    def read_raw_sinogram(self,
                          fname,
                          type='tiff',
                          center=None,
                          pixel_size=1,
                          fin_angle=180,
                          max_count=None,
                          **kwargs):
        """
        Read raw sinogram from file.
        :param fname: file name
        :param type: file format
        :param center: rotation center
        :param preprocess: whether or not to preprocess the sinogram to remove singularities
        :param pixel_size: pixel size (um)
        :param kwargs:
        :return:
        """

        if type == 'hdf5':
            slice = kwargs['slice']
            raw_sino = np.squeeze(
                dxchange.read_aps_32id(fname, sino=(slice, slice + 1)))
        else:
            raw_sino = dxchange.read_tiff(fname)
        raw_sino = np.copy(raw_sino)
        self.raw_sino = Sinogram(raw_sino,
                                 'raw',
                                 coords=center,
                                 center=center,
                                 normalize_bg=False,
                                 minus_log=False,
                                 fin_angle=fin_angle,
                                 max_count=max_count)
        self.pixel_size = pixel_size
Пример #9
0
def omni_read(f_input, begin=None, end=None):
    '''support tiff, tiff stack, hdf5'''
    if not f_input:
        return None
    f_input = os.path.abspath(f_input)
    matches = re.match(
        r'^(?P<dirname>.*)/(?P<fname>.*)\.(?P<ext>[^:]*)($|:(?P<dataset>.*$))',
        f_input)
    # print(matches.groupdict())
    if matches['ext'] == 'tif' or matches['ext'] == 'tiff':
        if begin is not None and end is not None:
            data = dxchange.read_tiff_stack(f_input, ind=range(begin, end))
        else:
            # print(f_input)
            S, L = check_stack_len(f_input)
            # print(S,L)
            if L > 1:
                data = dxchange.read_tiff_stack(f_input, ind=range(S, S + L))
            else:
                data = dxchange.read_tiff(f_input)
    elif matches['ext'] == 'h5' or matches['ext'] == 'hdf5':
        tokens = f_input.split(':')
        dataset_name = tokens[1]
        f_input = h5py.File(tokens[0], 'r')
        data = np.asarray(f_input[tokens[1]])
    else:
        print('not implemented file type')
    return data
Пример #10
0
def find_center_single(sino_name,
                       search_range,
                       search_step=1,
                       preprocess_single=False,
                       method='entropy',
                       output_fname='center_pos.txt'):

    log = open(output_fname, 'a')
    center_st, center_end = search_range
    sino = dxchange.read_tiff(sino_name)
    if sino.ndim == 2:
        sino = sino.reshape([sino.shape[0], 1, sino.shape[1]])
    if preprocess_single:
        sino = preprocess(np.copy(sino))
    if method == 'manual':
        write_center(sino,
                     tomopy.angles(sino.shape[0]),
                     dpath='center',
                     cen_range=(center_st, center_end, search_step))
    elif method == 'vo':
        mid = sino.shape[2] / 2
        smin = (center_st - mid) * 2
        smax = (center_end - mid) * 2
        center = find_center_vo(sino, smin=smin, smax=smax, step=search_step)
        internal_print('Center is {}.'.format(center))
        log.write('{}\n'.format(center))
        log.close()
Пример #11
0
def _func(i):

    wpu.print_blue("MESSAGE: loop " + str(i) + ": " + listOfDataFiles[i])

    img = dxchange.read_tiff(listOfDataFiles[i])

    darkMeanValue = np.mean(wpu.crop_matrix_at_indexes(img, idx4cropDark))

    #TODO xshi, need to add option of input one value

    img = img - darkMeanValue  # calculate and remove dark
    img = wpu.crop_matrix_at_indexes(img, idx4crop)

    pv = int(period_harm_Vert / (sourceDistanceV + zvec[i]) *
             (sourceDistanceV + np.min(zvec)))
    ph = int(period_harm_Horz / (sourceDistanceH + zvec[i]) *
             (sourceDistanceH + np.min(zvec)))

    if plotFourierImages:

        wgi.plot_harmonic_grid(img, [pv, ph], isFFT=False)

        plt.savefig('FFT_{:.0f}mm.png'.format(zvec[i] * 1e3))
        plt.show(block=False)
        plt.close()

        wgi.plot_harmonic_peak(img, [pv, ph], isFFT=False)

        plt.savefig('FFT_peaks_{:.0f}mm.png'.format(zvec[i] * 1e3))
        plt.show(block=False)
        plt.close()

    return wgi.visib_1st_harmonics(img, [pv, ph],
                                   searchRegion=searchRegion,
                                   unFilterSize=unFilterSize)
Пример #12
0
def retrieve_phase_far_field(src_fname,
                             save_path,
                             output_fname=None,
                             pad_length=256,
                             n_epoch=100,
                             learning_rate=0.001):

    # raw data is assumed to be centered at zero frequency
    prj_np = dxchange.read_tiff(src_fname)
    if output_fname is None:
        output_fname = os.path.basename(
            os.path.splitext(src_fname)[0]) + '_recon'

    # take modulus and inverse shift
    prj_np = ifftshift(np.sqrt(prj_np))

    obj_init = np.random.normal(50, 10, list(prj_np.shape) + [2])

    obj = tf.Variable(obj_init, dtype=tf.float32, name='obj')
    prj = tf.constant(prj_np, name='prj')

    obj_real = tf.cast(obj[:, :, 0], dtype=tf.complex64)
    obj_imag = tf.cast(obj[:, :, 1], dtype=tf.complex64)

    # obj_pad = tf.pad(obj, [[pad_length, pad_length], [pad_length, pad_length], [0, 0]], mode='SYMMETRIC')
    det = tf.fft2d(obj_real + 1j * obj_imag, name='detector_plane')

    loss = tf.reduce_mean(tf.squared_difference(tf.abs(det), prj, name='loss'))

    sess = tf.Session()

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    optimizer = optimizer.minimize(loss)

    sess.run(tf.global_variables_initializer())

    for i_epoch in range(n_epoch):
        t0 = time.time()
        _, current_loss = sess.run([optimizer, loss])
        print('Iteration {}: loss = {}, Δt = {} s.'.format(
            i_epoch, current_loss,
            time.time() - t0))

    det_final = sess.run(det)
    obj_final = sess.run(obj)
    res = np.linalg.norm(obj_final, 2, axis=2)
    dxchange.write_tiff(res,
                        os.path.join(save_path, output_fname),
                        dtype='float32',
                        overwrite=True)
    dxchange.write_tiff(fftshift(np.angle(det_final)),
                        os.path.join(save_path, 'detector_phase'),
                        dtype='float32',
                        overwrite=True)
    dxchange.write_tiff(fftshift(np.abs(det_final)**2),
                        os.path.join(save_path, 'detector_mag'),
                        dtype='float32',
                        overwrite=True)

    return
Пример #13
0
def search_in_folder_dnn(dest_folder,
                         window=((600, 600), (1300, 1300)),
                         dim_img=128,
                         seed=1337,
                         batch_size=50):

    patch_size = (dim_img, dim_img)
    nb_classes = 2
    save_intermediate = False
    # number of convolutional filters to use
    nb_filters = 32
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 3
    nb_evl = 100

    fnames = glob.glob(os.path.join(dest_folder, '*.tiff'))
    fnames = np.sort(fnames)

    mdl = model(dim_img, nb_filters, nb_conv, nb_classes)

    mdl.load_weights('weight_center.h5')
    start_time = time.time()
    Y_score = np.zeros((len(fnames)))

    for i in range(len(fnames)):
        print(fnames[i])
        img = dxchange.read_tiff(fnames[i])
        X_evl = np.zeros((nb_evl, dim_img, dim_img))

        for j in range(nb_evl):
            X_evl[j] = xlearn.img_window(img[window[0][0]:window[1][0],
                                             window[0][1]:window[1][1]],
                                         dim_img,
                                         reject_bg=True,
                                         threshold=1.2e-4,
                                         reset_random_seed=True,
                                         random_seed=j)
        X_evl = xlearn.convolve_stack(X_evl, xlearn.get_gradient_kernel())
        X_evl = xlearn.nor_data(X_evl)
        if save_intermediate:
            dxchange.write_tiff(X_evl,
                                os.path.join('debug', 'x_evl',
                                             'x_evl_{}'.format(i)),
                                dtype='float32',
                                overwrite=True)
        X_evl = X_evl.reshape(X_evl.shape[0], 1, dim_img, dim_img)

        Y_evl = mdl.predict(X_evl, batch_size=batch_size)
        Y_score[i] = sum(np.dot(Y_evl, [0, 1]))
        # print('The evaluate score is:', Y_score[i])
        # Y_score = sum(np.round(Y_score))/len(Y_score)

    ind_max = np.argmax(Y_score)
    best_center = float(os.path.splitext(fnames[ind_max])[0])
    print('Center search done in {} s. Optimal center is {}.'.format(
        time.time() - start_time, best_center))

    return best_center
def _func(i):

    wpu.print_blue("MESSAGE: loop " + str(i) + ": " + \
                   listOfDataFiles[i])

    if idx == [0, -1, 0, -1]:
        img = dxchange.read_tiff(listOfDataFiles[i]) - dark_im
    else:
        img = wpu.crop_matrix_at_indexes(dxchange.read_tiff(listOfDataFiles[i]), idx) - dark_im


    pv, ph = period_harm_Vert, period_harm_Horz

    pv = int(period_harm_Vert/(sourceDistance + zvec[i]-zvec[0])*sourceDistance)
    ph = int(period_harm_Horz/(sourceDistance + zvec[i]-zvec[0])*sourceDistance)


    return wgi.visib_1st_harmonics(img, [pv, ph], searchRegion=10)
Пример #15
0
    def setUp(self):
        """Load the test dataset from the disk."""
        # Model parameters
        self.n = 128  # object size n x,y
        self.nz = 128  # object size in z
        self.ntheta = 128 * 3 // 2  # number of angles (rotations)
        self.center = self.n / 2  # rotation center
        self.theta = np.linspace(0, np.pi,
                                 self.ntheta).astype('float32')  # angles

        self.niter = 128  # tomography iterations
        self.pnz = 128  # number of slice partitions
        # Load object
        beta = dxchange.read_tiff(
            os.path.join(testdir, 'data', 'beta-chip-128.tiff'))
        delta = dxchange.read_tiff(
            os.path.join(testdir, 'data', 'delta-chip-128.tiff'))
        self.u0 = delta + 1j * beta
Пример #16
0
def minimum_entropy(folder, pattern='*.tiff', range=(-0.002, 0.002)):

    flist = glob.glob(os.path.join(folder, pattern))
    a = []
    s = []
    for fname in flist:
        img = dxchange.read_tiff(fname)
        s.append(entropy(img, range=range))
        a.append(fname)
    return a[np.argmin(s)]
Пример #17
0
def red_stack_tiff(path):
    # path0 = 'D:/pycharm/pycharm/py/resig/data/shapp3d_160/'
    files = os.listdir(path)
    prj = []
    # prj0 = np.zeros((len(files), size, size))
    for n,file in enumerate(files):
        if is_image_file(file):
            p = dxchange.read_tiff(path + file)
            prj.append(p)
    pr = np.array(prj)
    return pr
Пример #18
0
def minimum_entropy(folder, pattern='*.tiff', range=None, mask_ratio=0.9, window=None, ring_removal=True,
                    center_x=None, center_y=None, reliability_screening=False, verbose=False):

    flist = glob.glob(os.path.join(folder, pattern))
    flist.sort()
    a = []
    s = []
    if range is None:
        temp = dxchange.read_tiff(flist[int(len(flist) / 2)])
        temp = temp.copy()
        temp_std = np.std(temp)
        temp_mean = np.mean(temp)
        temp[np.where(temp > (temp_mean + temp_std * 10))] = temp_mean
        temp[np.where(temp < (temp_mean - temp_std * 10))] = temp_mean
        hist_min = temp.min()
        hist_min = hist_min * 2 if hist_min < 0 else hist_min * 0.5
        hist_max = temp.max()
        hist_max = hist_max * 2 if hist_max > 0 else hist_min * 0.5
        range = (hist_min, hist_max)
        print('Auto-determined histogram range is ({}, {}).'.format(hist_min, hist_max))
    for fname in flist:
        if verbose:
            print(fname)
        img = dxchange.read_tiff(fname)
        # if max(img.shape) > 1000:
        #     img = scipy.misc.imresize(img, 1000. / max(img.shape), mode='F')
        # if ring_removal:
        #     img = np.squeeze(tomopy.remove_ring(img[np.newaxis, :, :]))
        s.append(entropy(img, range=range, mask_ratio=mask_ratio, window=window, ring_removal=ring_removal,
                         center_x=center_x, center_y=center_y))
        a.append(fname)
        gc.collect()
    if reliability_screening:
        if a[np.argmin(s)] in [flist[0], flist[-1]]:
            return None
        elif abs(np.min(s) - np.mean(s)) < 0.2 * np.std(s):
            return None
        else:
            return float(os.path.splitext(os.path.basename(a[np.argmin(s)]))[0])
    else:
        return float(os.path.splitext(os.path.basename(a[np.argmin(s)]))[0])
Пример #19
0
def minimum_entropy(folder, pattern='*.tiff', range=None, mask_ratio=0.9, window=None, ring_removal=True,
                    center_x=None, center_y=None, reliability_screening=False, verbose=False):

    flist = glob.glob(os.path.join(folder, pattern))
    flist.sort()
    a = []
    s = []
    if range is None:
        temp = dxchange.read_tiff(flist[int(len(flist) / 2)])
        temp = temp.copy()
        temp_std = np.std(temp)
        temp_mean = np.mean(temp)
        temp[np.where(temp > (temp_mean + temp_std * 10))] = temp_mean
        temp[np.where(temp < (temp_mean - temp_std * 10))] = temp_mean
        hist_min = temp.min()
        hist_min = hist_min * 2 if hist_min < 0 else hist_min * 0.5
        hist_max = temp.max()
        hist_max = hist_max * 2 if hist_max > 0 else hist_min * 0.5
        range = (hist_min, hist_max)
        print('Auto-determined histogram range is ({}, {}).'.format(hist_min, hist_max))
    for fname in flist:
        if verbose:
            print(fname)
        img = dxchange.read_tiff(fname)
        # if max(img.shape) > 1000:
        #     img = scipy.misc.imresize(img, 1000. / max(img.shape), mode='F')
        # if ring_removal:
        #     img = np.squeeze(tomopy.remove_ring(img[np.newaxis, :, :]))
        s.append(entropy(img, range=range, mask_ratio=mask_ratio, window=window, ring_removal=ring_removal,
                         center_x=center_x, center_y=center_y))
        a.append(fname)
        gc.collect()
    if reliability_screening:
        if a[np.argmin(s)] in [flist[0], flist[-1]]:
            return None
        elif abs(np.min(s) - np.mean(s)) < 0.2 * np.std(s):
            return None
        else:
            return float(os.path.splitext(os.path.basename(a[np.argmin(s)]))[0])
    else:
        return float(os.path.splitext(os.path.basename(a[np.argmin(s)]))[0])
Пример #20
0
def fourier_ring_correlation(obj,
                             ref,
                             step_size=1,
                             save_path='frc',
                             save_mask=False):

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    radius_max = int(min(obj.shape) / 2)
    f_obj = np_fftshift(fft2(obj))
    f_ref = np_fftshift(fft2(ref))
    f_prod = f_obj * np.conjugate(f_ref)
    f_obj_2 = np.real(f_obj * np.conjugate(f_obj))
    f_ref_2 = np.real(f_ref * np.conjugate(f_ref))
    radius_ls = np.arange(1, radius_max, step_size)
    fsc_ls = []
    np.save(os.path.join(save_path, 'radii.npy'), radius_ls)

    for rad in radius_ls:
        print(rad)
        if os.path.exists(
                os.path.join(save_path,
                             'mask_rad_{:04d}.tiff'.format(int(rad)))):
            mask = dxchange.read_tiff(
                os.path.join(save_path,
                             'mask_rad_{:04d}.tiff'.format(int(rad))))
        else:
            mask = generate_ring(obj.shape, rad, anti_aliasing=2)
            if save_mask:
                dxchange.write_tiff(
                    mask,
                    os.path.join(save_path,
                                 'mask_rad_{:04d}.tiff'.format(int(rad))),
                    dtype='float32',
                    overwrite=True)
        fsc = abs(np.sum(f_prod * mask))
        fsc /= np.sqrt(np.sum(f_obj_2 * mask) * np.sum(f_ref_2 * mask))
        fsc_ls.append(fsc)
        np.save(os.path.join(save_path, 'fsc.npy'), fsc_ls)

    matplotlib.rcParams['pdf.fonttype'] = 'truetype'
    fontProperties = {
        'family': 'serif',
        'serif': ['Times New Roman'],
        'weight': 'normal',
        'size': 12
    }
    plt.rc('font', **fontProperties)
    plt.plot(radius_ls.astype(float) / radius_ls[-1], fsc_ls)
    plt.xlabel('Spatial frequency (1 / Nyquist)')
    plt.ylabel('FRC')
    plt.savefig(os.path.join(save_path, 'frc.pdf'), format='pdf')
Пример #21
0
def global_histogram(dmin, dmax, n_bins, plot=True):
    tiff_list = glob.glob('*.tiff')
    mybins = np.linspace(dmin, dmax, n_bins + 1)
    myhist = np.zeros(n_bins, dtype='int32')
    bin_width = (dmax - dmin) / n_bins
    for fname in tiff_list:
        print('Now analyzing' + fname)
        temp = dxchange.read_tiff(fname).flatten()
        temp = np.ndarray.flatten(temp)
        myhist = myhist + np.histogram(temp, bins=mybins)[0]
    if plot:
        plt.bar(mybins[:-1], myhist, width=bin_width)
        plt.show()
    return myhist, mybins
Пример #22
0
def parse_source_folder(src_dir, prefix):
    flist = glob.glob(os.path.join(src_dir, prefix + '*.tif*'))
    raw_img = np.squeeze(dxchange.read_tiff(flist[0]))
    raw_img_shape = raw_img.shape
    theta_full_ls = []
    dist_ls = []
    for f in flist:
        i_theta = int(re.findall(r'\d+', f)[-2])
        i_dist = int(re.findall(r'\d+', f)[-1])
        theta_full_ls.append(i_theta)
        dist_ls.append(i_dist)
    theta_ls = np.unique(theta_full_ls)
    n_theta = len(theta_ls)
    n_dist = len(flist) // n_theta
    ind_ls = np.array(theta_full_ls) * n_dist + np.array(dist_ls)
    flist = np.array(flist)[np.argsort(ind_ls)]
    return flist, n_theta, n_dist, raw_img_shape
Пример #23
0
def reorganize_tiffs():
    tiff_list = glob.glob('*.tiff')
    for fname in tiff_list:
        print('Now processing ' + str(fname))
        # make downsampled subdirectories
        for ds in [1, 2, 4]:
            # create downsample folder if not existing
            folder_name = 'tiff_' + str(ds) + 'x'
            if not os.path.exists(folder_name):
                os.mkdir(folder_name)
            # copy file if downsample level is 1
            if ds == 1:
                shutil.copyfile(fname, folder_name + '/' + fname)
            # otherwise perform downsampling
            else:
                temp = dxchange.read_tiff(fname).flatten()
                temp = image_downsample(temp, ds)
                dxchange.write_tiff(temp, folder_name + '/' + fname)
Пример #24
0
def fourier_ring_correlation(obj, ref, step_size=1, save_path=None, save_mask=True, save_fname='fsc', threshold_curve=False):

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if obj.ndim == 2:
        fft_func = fft2
        gen_mask = generate_ring
        gen_kwargs = {'anti_aliasing: 2'}
    elif obj.ndim == 3:
        fft_func = fftn
        gen_mask = generate_shell
        gen_kwargs = {}

    radius_max = min(obj.shape) // 2
    f_obj = np_fftshift(fft_func(obj))
    f_ref = np_fftshift(fft_func(ref))
    f_prod = f_obj * np.conjugate(f_ref)
    f_obj_2 = np.real(f_obj * np.conjugate(f_obj))
    f_ref_2 = np.real(f_ref * np.conjugate(f_ref))
    radius_ls = np.arange(1, radius_max, step_size)
    fsc_ls = []
    if save_path is not None:
        np.save(os.path.join(save_path, 'radii.npy'), radius_ls)

    for rad in radius_ls:
        if os.path.exists(os.path.join(save_path, 'mask_rad_{:04d}.tiff'.format(int(rad)))):
            mask = dxchange.read_tiff(os.path.join(save_path, 'mask_rad_{:04d}.tiff'.format(int(rad))))
        else:
            mask = gen_mask(obj.shape, rad, **gen_kwargs)
            if save_mask:
                dxchange.write_tiff(mask, os.path.join(save_path, 'mask_rad_{:04d}.tiff'.format(int(rad))),
                                    dtype='float32', overwrite=True)
        fsc = abs(np.sum(f_prod * mask))
        fsc /= np.sqrt(np.sum(f_obj_2 * mask) * np.sum(f_ref_2 * mask))
        fsc_ls.append(fsc)
        if save_path is not None:
            np.save(os.path.join(save_path, '{}.npy'.format(save_fname)), fsc_ls)
    return np.array(fsc_ls)
Пример #25
0
patch_size = (dim_img, dim_img)

mdl = model(dim_img, nb_filters, nb_conv)
mdl.load_weights('transform_training_weights.h5')

print('Predicting')

folder = '../../test/test_data/'
files = [f for f in sorted(os.listdir(folder)) if re.match(r'.+.tiff', f)]

for fname in files:
    time_start = time.time()
    sname = fname.split('.')
    time_start = time.time()
    fname_save = folder + sname[0] + '_result'
    img_test = dxchange.read_tiff(folder + fname)
    img_rec = predict(mdl, img_test, patch_size, patch_step, batch_size, dim_img)
    dxchange.write_tiff(img_rec, fname_save, dtype='float32')
    print(time.time()-time_start)

    
    
    
    
    
    
    
    
    
    
    
    for k in range(data.shape[0]):
        h, e = np.histogram(data[k][:], 1000)
        stend = np.where(h > np.max(h) * 0.005)
        st = stend[0][0]
        end = stend[0][-1]
        mmin[k] = e[st]
        mmax[k] = e[end + 1]

    return mmin, mmax


if __name__ == "__main__":

    # read data and angles
    data = dxchange.read_tiff(
        '/home/beams0/VNIKITIN/lamino_doga/lamalign/data/matlab_rec/matlab-recon.tif'
    ).astype('float32')  #[:, ::4, ::4]
    theta = np.load(
        '/home/beams0/VNIKITIN/lamino_doga/lamalign/data/matlab_rec/angle.npy'
    ).astype('float32') / 180 * np.pi
    idset = np.int(sys.argv[1])
    data = data[idset::2]
    theta = theta[idset::2]
    #ids_bad = np.array([29,44,56,102,152])
    phi = 61.18 / 180 * np.pi
    det = data.shape[2]
    ntheta = data.shape[0]

    # normalize data for optical flow computations
    mmin, mmax = find_min_max(data)
Пример #27
0
import os
import matplotlib.pyplot as plt
import matplotlib

# data_folder = '/raid/home/mingdu/data/VS72_Again_180_25kev_lens10x_dfocus12cm_76_y1_x0/localtomo'
data_folder = '/raid/home/mingdu/data/shirley/local_tomo'
# data_folder = '/raid/home/mingdu/data/charcoal/local_tomo'
# data_folder = '/raid/home/mingdu/data/SAMPLE_03/panos'
# full_proj_fname = 'proj_raw_mlog.tiff'
full_proj_fname = '0_norm.tiff'
# full_proj_fname = 'frame0900-2.tiff'
tile_size = (1200, 1920)
half_tile_size = np.floor((np.array(tile_size) / 2)).astype('int')
shift = 1700
central_slice = 10068
full_proj = dxchange.read_tiff(os.path.join(data_folder, full_proj_fname))
full_proj = np.squeeze(full_proj)
# pos_ls = range(0, full_proj.shape[-1] - tile_size[0] + 1, shift)
pos_ls = range(0, 12816 - tile_size[0] + 1, shift)
photon_multiplier_ls = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]

mean_diff_ls = []

for ph_mult in photon_multiplier_ls:
    print('Multiplier: {}'.format(ph_mult))
    abs_diff_ls = []
    for i, pos in enumerate(pos_ls):
        tile = full_proj[central_slice - half_tile_size[0]:central_slice +
                         half_tile_size[0], pos:pos + tile_size[1]]
        tile = np.exp(-tile) * ph_mult
        # dxchange.write_tiff(tile, os.path.join(data_folder, 'proj_tiles', '{}'.format(ph_mult), '{:02d}_poi'.format(i)),
Пример #28
0
__authors__ = "Walan Grizolli"
__copyright__ = "Copyright (c) 2016, Affiliation"
__version__ = "0.1.0"


# =============================================================================
# %% preamble. Load parameters from ini file
# =============================================================================


inifname = '.speckleAnalyses.ini'

config, ini_pars, ini_file_list = wpu.load_ini_file_terminal_dialog(inifname)

fname = ini_file_list.get('image_filename')
image = dxchange.read_tiff(fname)
image_ref = dxchange.read_tiff(ini_file_list.get('ref_filename'))

idx = list(map(int, ini_pars.get('crop').split(',')))
pixelsize = float(ini_pars.get('pixel size'))
phenergy = float(ini_pars.get('photon energy'))
distDet2sample = float(ini_pars.get('distance detector to sample'))
halfsubwidth = int(ini_pars.get('halfsubwidth'))
halfTemplateSize = int(ini_pars.get('halfTemplateSize'))
subpixelResolution = int(ini_pars.get('subpixelResolution'))
npointsmax = int(ini_pars.get('npointsmax'))
ncores = float(ini_pars.get('ncores')) / float(ini_pars.get('ncores of machine'))
saveH5 = ini_pars.get('save hdf5 files')

if subpixelResolution < 1: subpixelResolution = None
if halfTemplateSize < 1: halfTemplateSize = None
Пример #29
0
import signal
import sys

import cupy as cp
import dxchange
import numpy as np
import tomocg as pt

if __name__ == "__main__":

    # Model parameters
    n = 128  # object size n x,y
    nz = 128  # object size in z
    ntheta = 128  # number of angles (rotations)
    center = n / 2  # rotation center
    theta = np.linspace(0, np.pi, ntheta).astype('float32')  # angles
    niter = 64  # tomography iterations
    pnz = 32  # number of slice partitions for simultaneous processing in tomography
    # Load object
    beta = dxchange.read_tiff('data/beta-chip-128.tiff')
    delta = dxchange.read_tiff('data/delta-chip-128.tiff')
    u0 = delta + 1j * beta

    # Class gpu solver
    with pt.SolverTomo(theta, ntheta, nz, n, pnz, center) as slv:
        # generate data
        data = slv.fwd_tomo_batch(u0)
        # adjoint test
        u1 = slv.adj_tomo_batch(data)
        print('Adjoint test: ', np.sum(data * np.conj(data)), '=?',
              np.sum(u0 * np.conj(u1)))
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', default='None')
parser.add_argument('--save_path', default='cell/ptychography')
parser.add_argument('--output_folder', default='test') # Will create epoch folders under this
args = parser.parse_args()
epoch = args.epoch
if epoch == 'None':
    epoch = 0
    init = None
else:
    epoch = int(epoch)
    if epoch == 0:
        init = None
    else:
        init_delta = dxchange.read_tiff(os.path.join(args.save_path, args.output_folder, 'epoch_{}/delta_ds_1.tiff'.format(epoch - 1)))
        init_beta = dxchange.read_tiff(os.path.join(args.save_path, args.output_folder, 'epoch_{}/beta_ds_1.tiff'.format(epoch - 1)))
        print(os.path.join(args.save_path, args.output_folder, 'epoch_{}/delta_ds_1.tiff'.format(epoch - 1)))
        init = [np.array(init_delta[...]), np.array(init_beta[...])]


params_2d_cell = {'fname': 'data_cell_phase.h5',
                    'theta_st': 0,
                    'theta_end': 0,
                    'theta_downsample': 1,
                    'n_epochs': 200,
                    'obj_size': (325, 325, 1),
                    'alpha_d': 0,
                    'alpha_b': 0,
                    'gamma': 0,
                    'probe_size': (72, 72),
Пример #31
0
@author: Hannah
"""


import dxchange
import matplotlib.pyplot as plt

from xlearn.transform import train
from xlearn.transform import model

batch_size=800
nb_epoch=10
dim_img=20
nb_filters=32
nb_conv=3
patch_step=4
patch_size=(dim_img, dim_img)

img_x=dxchange.read_tiff('C:/Users/Hannah/Downloads/python programs/original_input/image235/235_normal.tif')
img_y=dxchange.read_tiff('C:/Users/Hannah/Downloads/python programs/original_input/image235/235_label.tif')

plt.imshow(img_x, cmap='Greys_r')
plt.show()

plt.imshow(img_y, cmap='Greys_r')
plt.show()

mdl=train(img_x, img_y, patch_size, patch_step, dim_img, nb_filters, nb_conv, batch_size, nb_epoch)
mdl.save_weights('training_weights.h5')
Пример #32
0
import dxchange
import numpy as np
from transform import train, predict


batch_size = 400
nb_epoch = 50
dim_img = 64
nb_filters = 40
nb_conv = 3
patch_step = 1
patch_size = (dim_img, dim_img)
smooth = 1.

test1 = dxchange.read_tiff('/home/beams/YANGX/ptychography/datatolearn/lr_0034.tiff')
ih, iw = test1.shape
img_prd = np.zeros((ih, iw))
wpath = 'th_weights/full2.h5'
img1 = test1[:ih/2, :iw/2]
img_rec1 = predict(img1, patch_size, patch_step, nb_filters, nb_conv, batch_size, dim_img, wpath)
img_prd[:ih/2, :iw/2] = img_rec1

img1 = test1[ih/2+1:, iw/2+1:]
img_rec1 = predict(img1, patch_size, patch_step, nb_filters, nb_conv, batch_size, dim_img, wpath)
img_prd[ih/2+1:, iw/2+1:] = img_rec1

img1 = test1[:ih/2, iw/2+1:]
img_rec1 = predict(img1, patch_size, patch_step, nb_filters, nb_conv, batch_size, dim_img, wpath)
img_prd[:ih/2, iw/2+1:] = img_rec1

img1 = test1[ih/2+1:, :iw/2]
Пример #33
0
from utils.image_io import *
from skimage.measure import compare_psnr
import numpy as np
import torch
import torch.nn as nn
from collections import namedtuple
import dxchange
import matplotlib

matplotlib.use('TkAgg')
import os
from crosstalk_separation import *

if __name__ == "__main__":

    img = dxchange.read_tiff('data/au_ni.tiff')
    img = img[114:114 + 272, 114:-114, :]
    img = np.transpose(img, (2, 0, 1))
    img = normalize(img)

    # fig, axes = plt.subplots(1, 2, figsize=(8, 4))
    # axes[0].imshow(img[0])
    # axes[1].imshow(img[1])
    # plt.show()

    input1 = img[0].reshape([1, *img[0].shape])
    input2 = img[1].reshape([1, *img[1].shape])

    # Separation from two images
    t = TwoImagesSeparation('input1', 'input2', input1, input2, num_iter=7000)
    t.optimize()
Пример #34
0
rec_ax = plt.axes([0.15, 0.15, 0.65, 0.03], facecolor=axcolor)
rec_amp = Slider(rec_ax, 'Rec Scale', 0.1, 300.0, valinit=150)
rec_amp.on_changed(update)


proj_ax = plt.axes([0.15, 0.05, 0.65, 0.03], facecolor=axcolor)
proj_amp = Slider(proj_ax, 'Proj Scale', 0.1, 4000.0, valinit=2000)
proj_amp.on_changed(update)



while True:
  cnt += 1
  if cnt % 5 == 0:
    proj = dxchange.read_tiff(spath + 'proj.tif')
    if im1 is None:
        im1 = ax1.imshow(proj, cmap='gray', vmin=0, vmax=4000)
    else:
        im1.set_data(proj)
        im1.set_clim(0, proj_amp.val)
    fig.canvas.draw_idle()
    plt.pause(0.01)

  recon = dxchange.read_tiff(spath + 'recon.tif')
  print(recon.shape, rec_amp.val, proj_amp.val)

  if im2 is None:
      im2 = ax2.imshow(recon, cmap='gray', vmin=0, vmax=300)
  else:
      im2.set_data(recon)
Пример #35
0
fname = '../../test/test_data/1038.tiff'
ind_uncenter1 = range(1038, 1047)
ind_uncenter2 = range(1049, 1057)
uncenter1 = dxchange.read_tiff_stack(fname, ind=ind_uncenter1, digit=4)
uncenter2 = dxchange.read_tiff_stack(fname, ind=ind_uncenter2, digit=4)
uncenter = np.concatenate((uncenter1, uncenter2), axis=0)
uncenter = nor_data(uncenter)
print (uncenter.shape)
uncenter = img_window(uncenter[:, 360:1460, 440:1440], 200)
print (uncenter.shape)
uncenter_patches = extract_3d(uncenter, patch_size, 1)
np.random.shuffle(uncenter_patches)
print (uncenter_patches.shape)
# print uncenter_patches.shape
center_img = dxchange.read_tiff('../../test/test_data/1048.tiff')
center_img = nor_data(center_img)
print (center_img.shape)
center_img = img_window(center_img[360:1460, 440:1440], 400)
center_patches = extract_3d(center_img, patch_size, 1)
np.random.shuffle(center_patches)
print (center_patches.shape)
# plt.imshow(center_img, cmap='gray', interpolation= None)
# plt.show()

x_train = np.concatenate((uncenter_patches[0:50000], center_patches[0:50000]), axis=0)
x_test = np.concatenate((uncenter_patches[50000:60000], center_patches[50000:60000]), axis=0)
x_train = x_train.reshape(x_train.shape[0], 1, dim_img, dim_img)
x_test = x_test.reshape(x_test.shape[0], 1, dim_img, dim_img)
y_train = np.zeros(100000)
y_train[50000:99999] = 1
Пример #36
0
    det = [64, 64] # detector size
    ntheta = 256*3//2  # number of angles (rotations)
    noise = True  # apply discrete Poisson noise

    # Reconstrucion parameters
    modela = ['poisson','gaussian']  # minimization funcitonal (poisson,gaussian)
    alphaa = [1e-11,3e-7] # tv regularization penalty coefficient
    piter = 4  # ptychography iterations
    titer = 4  # tomography iterations
    NITER = 400  # ADMM iterations

    ptheta = 4 # NEW: number of angular partitions for simultaneous processing in ptychography
    initshift = 0.0# NEW: Initial phase shift bubles: 128:0.8, chip 128: 0.0046, 256:0.0081

    # Load a 3D object
    beta0 = dxchange.read_tiff('data/beta-pad2-256.tiff')[42:42+16]#[32:32+16*bbin:bbin,::bbin,::bbin]#:2,::2,::2]
    delta0 = -dxchange.read_tiff('data/delta-pad2-256.tiff')[42:42+16]#[32:32+16*bbin:bbin,::bbin,::bbin]#:2,::2,::2]
    beta = np.zeros([2*prbsize+beta0.shape[0],beta0.shape[1],beta0.shape[2]],dtype='float32')
    delta = np.zeros([2*prbsize+beta0.shape[0],beta0.shape[1],beta0.shape[2]],dtype='float32')    
    beta[prbsize:-prbsize] = beta0
    delta[prbsize:-prbsize] = delta0
    # print(beta.shape)

    maxint = maxinta[igpu]
    if(maxint>0.9):
        noise = False
    obj = cp.array(delta+1j*beta)
    prb = cp.array(objects.probe(prbsize, maxint))#,rout=1.03))
    theta = cp.linspace(0, np.pi, ntheta).astype('float32')
    scan = cp.array(objects.scanner3(theta, obj.shape, prbshift,
                                    prbshift, prbsize, spiral=0, randscan=True, save=False)) 
src_dir = 'data_rescaled_registered'
prefix = '*'
psize_cm = 99.8e-7
dist_cm_ls = np.array([7.36, 7.42, 7.70])
energy_ev = 17500
alpha_1 = 5e-4
alpha_2 = 1e-16
energy_kev = energy_ev * 1e-3

flist, n_theta, n_dists, raw_img_shape = adorym.parse_source_folder(
    src_dir, prefix)

for i_theta in range(n_theta):
    prj_ls = []
    for i_dist in range(n_dists):
        img = np.squeeze(dxchange.read_tiff(flist[i_theta * n_dists + i_dist]))
        prj_ls.append(img)
    phase = multidistance_ctf(prj_ls,
                              dist_cm_ls,
                              psize_cm,
                              energy_kev,
                              kappa=50,
                              sigma_cut=0.01,
                              alpha_1=alpha_1,
                              alpha_2=alpha_2)
    dxchange.write_tiff(np.squeeze(phase),
                        os.path.join(
                            'data_ctf_orig',
                            os.path.basename(flist[i_theta * n_dists])),
                        dtype='float32',
                        overwrite=True)
Пример #38
0
    nu_cut = 0.6 * u_max
    f = 0.5 * (1 - erf((abs_nu - nu_cut) / sigma_cut))
    alpha = alpha_1 * f + alpha_2 * (1 - f)
    # plt.imshow(abs(np.log(np_fftshift(fft2(prj_ls[0] - 1, axes=(-2, -1)), axes=(-2, -1)))))
    # plt.imshow(alpha)
    # plt.show()
    # alpha = 0
    phase = np.sum(
        np_fftshift(fft2(prj_ls - 1, axes=(-2, -1)), axes=(-2, -1)) *
        (np.sin(xi_ls) + 1. / kappa * np.cos(xi_ls)),
        axis=0)
    phase /= (
        np.sum(2 * (np.sin(xi_ls) + 1. / kappa * np.cos(xi_ls))**2, axis=0) +
        alpha)
    phase = ifft2(np_ifftshift(phase, axes=(-2, -1)), axes=(-2, -1))

    return np.abs(phase)


if __name__ == '__main__':

    import dxchange
    a = dxchange.read_tiff('data/cameraman_512_dp.tiff')
    a = tf.constant(a)
    sess = tf.Session()
    a = sess.run(gaussian_blur(a, 5, 2) - a)
    plt.imshow(a)
    plt.show()
    # print(sess.run(image_entropy(a)))
    # print(get_gaussian_kernel(5, sigma=2))
Пример #39
0
__authors__ = "Walan Grizolli"
__copyright__ = "Copyright (c) 2016, Affiliation"
__version__ = "0.1.0"


# =============================================================================
# %% preamble. Load parameters from ini file
# =============================================================================


inifname = '.speckleAnalyses.ini'

config, ini_pars, ini_file_list = wpu.load_ini_file_terminal_dialog(inifname)

fname = ini_file_list.get('image_filename')
image = dxchange.read_tiff(fname)
image_ref = dxchange.read_tiff(ini_file_list.get('ref_filename'))

idx = list(map(int, ini_pars.get('crop').split(',')))
pixelsize = float(ini_pars.get('pixel size'))
phenergy = float(ini_pars.get('photon energy'))
distDet2sample = float(ini_pars.get('distance detector to sample'))
halfsubwidth = int(ini_pars.get('halfsubwidth'))
halfTemplateSize = int(ini_pars.get('halfTemplateSize'))
subpixelResolution = int(ini_pars.get('subpixelResolution'))
npointsmax = int(ini_pars.get('npointsmax'))
ncores = float(ini_pars.get('ncores')) / float(ini_pars.get('ncores of machine'))
saveH5 = ini_pars.get('save hdf5 files')

if subpixelResolution < 1: subpixelResolution = None
if halfTemplateSize < 1: halfTemplateSize = None
Пример #40
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Example script 
"""

from __future__ import print_function
from convnet.transform import train
import dxchange


batch_size = 800
nb_epoch = 10
dim_img = 20
nb_filters = 32
nb_conv = 3
patch_step = 4

patch_size = (dim_img, dim_img)

# read the training data

img_x = dxchange.read_tiff('../../test/test_data/training_input.tiff')
img_y = dxchange.read_tiff('../../test/test_data/training_output.tiff')

# train and save the model
model = train(img_x, img_y, patch_size, patch_step, dim_img, nb_filters, nb_conv, batch_size, nb_epoch)
model.save_weights('transform_training_weights.h5')

Пример #41
0
# %%

if __name__ == '__main__':

    # ==========================================================================
    # Experimental parameters
    # ==========================================================================

    (list_sample_files, list_ref_files, list_dark_files,
     pixelSize, stepSize) = intial_setup()

    # ==========================================================================
    # % % Load one image and crop
    # ==========================================================================

    img = dxchange.read_tiff(list_sample_files[0])

    [colorlimit,
     cmap] = wpu.plot_slide_colorbar(img, title='Raw Image',
                                         xlabel=r'x [$\mu m$ ]',
                                         ylabel=r'y [$\mu m$ ]',
                                   extent=wpu.extent_func(img, pixelSize)*1e6)

    img_croped, idx4crop = wpu.crop_graphic(zmatrix=img, verbose=True,
                                            kargs4graph={'cmap': cmap,
                                                         'vmin': colorlimit[0],
                                                         'vmax': colorlimit[1]})


    # ==========================================================================
    # %% Load tiff files to numpy array
pixelsize = [0.65e-6, 0.65e-6]  # vertical and horizontal pixel sizes in meters
distDet2sample = 0.18600  # in meters
sourceDistance = 100.0  # in meters, for divergence correction. to ignore it, use a big number >100

phenergy = 8e3  # in eV
wavelength = wpu.hc/phenergy  # wpu has an alias for hc
kwave = 2*np.pi/wavelength


# Phase grating paremeters
gratingPeriod = 4.8e-6  # in meters
# uncomment proper pattern period:
patternPeriod = gratingPeriod/np.sqrt(2.0)  # if half Pi grating
#patternPeriod = gratingPeriod/2.0  # if Pi grating

img = dxchange.read_tiff('data_example_for_single_grating/cb4p8um_halfPi_8KeV_10s_img.tif')
imgRef = dxchange.read_tiff('data_example_for_single_grating/cb4p8um_halfPi_8KeV_10s_ref.tif')
darkImg = dxchange.read_tiff('data_example_for_single_grating/10s_dark.tif')

img = img - darkImg
imgRef = imgRef - darkImg

# %% crop

img, idx4crop = wpu.crop_graphic_image(img)
imgRef = wpu.crop_matrix_at_indexes(imgRef, idx4crop)

# %% Find harmonic in the Fourier images

# calculate the theoretical position of the hamonics
period_harm_Vert_o = np.int(pixelsize[0]/patternPeriod*img.shape[0] /
Пример #43
0
mdl = model(dim_img, nb_filters, nb_conv, nb_classes)

mdl.load_weights('weight_center.h5')
print('The model loading time is %s seconds'%(time.time()-start_time))

Y_score = np.zeros((50, 501))
for i in range(50):
    slice_num = (i+2)*20
    datapath = '/home/oxygen/YANGX/Globus/center/test_04/slice'+str(slice_num)+'/*.tiff'
    # print(datapath)
    fnames = glob.glob(datapath)
    fnames = np.sort(fnames)
    # print(fnames)

    for j in range(len(fnames)):
        img = dxchange.read_tiff(fnames[j])
        img = -nor_data(img)
        # X_evl = np.zeros((nb_evl, dim_img, dim_img))
        # for k in range(nb_evl):
        #     X_evl[k] = img_window(img[360:1660, 440:1640], dim_img)
        X_evl = extract_patches(img[360:1660, 440:1640],
                                (128, 128), step=64, max_patches=None, random_state=None)
        X_evl = X_evl.reshape(X_evl.shape[0], 1, dim_img, dim_img)
        Y_evl = mdl.predict(X_evl, batch_size=batch_size)

        Y_score[i, j] = sum(np.dot(Y_evl, [0, 1]))
    # print(Y_score[i])

    #print('The evaluate score is:', Y_score[i])
    #Y_score = sum(np.round(Y_score))/len(Y_score)
    ind_max = np.argmax(Y_score[i, :])
def _load_experimental_pars(argv):

    if len(argv) == 10:

        fname_img, fname_imgRef, fname_blank = argv[1:4]

        pixelsize = float(argv[4])*1e-6
        gratingPeriod = float(argv[5])*1e-6
        pattern = argv[6]
        distDet2sample = float(argv[7])*1e-3
        phenergy = float(argv[8])*1e3
        sourceDistance = float(argv[9])

        img, imgRef, blank = (dxchange.read_tiff(fname_img),
                              dxchange.read_tiff(fname_imgRef),
                      dxchange.read_tiff(fname_blank))

    elif len(argv) == 1:

        (fname_img, fname_imgRef, fname_blank,
         pixelsize, gratingPeriod, pattern, distDet2sample,
         phenergy, sourceDistance) = _intial_gui_setup(argv[0])

        img, imgRef, blank = (dxchange.read_tiff(fname_img),
                              dxchange.read_tiff(fname_imgRef),
                      dxchange.read_tiff(fname_blank))

    else:
        print('ERROR: wrong number of inputs: {} \n'.format(len(argv)-1) +
              'Usage: \n'
              '\n'
              'singleGratingTalbotImaging.py : (no inputs) load dialogs \n'
              '\n'
              'singleGratingTalbotImaging.py [args] \n'
              '\n'
              'arg1: file name main image\n'
              'arg2: file name reference image\n'
              'arg3: file name dark image\n'
              'arg4: pixel size [um]\n'
              'arg5: Check Board grating period [um]\n'
              "arg6: pattern, 'Edge pi' or 'Diagonal half pi' \n"
              'arg7: distance detector to CB Grating [mm]\n'
              'arg8: Photon Energy [KeV]\n'
              'arg9: Distance to the source [m], to correct for beam\n'
              '      divergence (use 1e5 to ignore this, which means\n'
              '      source at infinity and zero divergence)\n'
              '\n')

        exit(-1)

    img = img - blank
    imgRef = imgRef - blank

    pixelsize = [pixelsize, pixelsize]
    # change here if you need rectangular pixel

    if pattern == 'Diagonal half pi':
        gratingPeriod *= 1.0/np.sqrt(2.0)
        phaseShift = 'halfPi'

    elif pattern == 'Edge pi':
        gratingPeriod *= 1.0/2.0
        phaseShift = 'Pi'

    saveFileSuf = 'cb{:.2f}um_'.format(gratingPeriod*1e6)
    saveFileSuf += phaseShift
    saveFileSuf += '_d{:.0f}mm_'.format(distDet2sample*1e3)
    saveFileSuf += '{:.1f}KeV'.format(phenergy*1e-3)
    saveFileSuf = saveFileSuf.replace('.', 'p')

    return (img, imgRef, saveFileSuf,
            pixelsize, gratingPeriod, pattern,
            distDet2sample,
            phenergy, sourceDistance)
distDet2sample = 0.18600  # in meters
sourceDistance = 100.0  # in meters, for divergence correction. to ignore it, use a big number >100

phenergy = 8e3  # in eV
wavelength = wpu.hc/phenergy  # wpu has an alias for hc
kwave = 2*np.pi/wavelength



# Phase grating paremeters
gratingPeriod = 4.8e-6  # in meters
# uncomment proper pattern period:
patternPeriod = gratingPeriod/np.sqrt(2.0)  # if half Pi grating
#patternPeriod = gratingPeriod/2.0  # if Pi grating

img = dxchange.read_tiff('../../data4wavepy_examples/Dialens/cb_halfpi_4p8um_distace110mm_sample.tif')
imgRef = dxchange.read_tiff('../../data4wavepy_examples/Dialens/cb_halfpi_4p8um_distace110mm_ref.tif')
darkImg = dxchange.read_tiff('../../data4wavepy_examples/Dialens/dark.tif')

img = img - darkImg
imgRef = imgRef - darkImg



# %% Find harmonic in the Fourier images

# calculate the theoretical position of the hamonics
period_harm_Vert_o = np.int(pixelsize[0]/patternPeriod*img.shape[0])
period_harm_Hor_o = np.int(pixelsize[1]/patternPeriod*img.shape[1])

harmPeriod = [period_harm_Vert_o, period_harm_Hor_o]
Пример #46
0
"""
Example script 
"""

from __future__ import print_function
from xlearn.transform import train
import dxchange


batch_size = 800
nb_epoch = 10
dim_img = 20
nb_filters = 32
nb_conv = 3
patch_step = 4

patch_size = (dim_img, dim_img)

# read the training data

train_folder ='/local/tomo/data/cnn/Al-10Sn-4Si-1Cu-Er-Zr/train/'

img_x = dxchange.read_tiff(train_folder + 'Slice_0563-segmentation.tiff')
img_y = dxchange.read_tiff(train_folder + 'Slice_0563.tiff')

# train and save the model
model = train(img_x, img_y, patch_size, patch_step, dim_img, nb_filters, nb_conv, batch_size, nb_epoch)
model.save_weights('transform_training_weights_al10.h5')