Пример #1
0
 def _cost(self, X, y, weights):
     d = dropout(X, self.keep_prob)
     c = convolve(d,
                  weights[0],
                  mode='valid',
                  axes=([1, 2], [2, 3]),
                  dot_axes=([3], [1]))  # [batch_size, N, 30, 30]
     n = batch_normalization(c, weights[3], weights[4])
     r = relu(n)
     m = maxout(r)
     d = dropout(m, self.keep_prob)
     c = convolve(d,
                  weights[1],
                  mode='valid',
                  axes=([2, 3], [2, 3]),
                  dot_axes=([1], [1]))  # [batch_size, 2N, 12, 12]
     n = batch_normalization(c, weights[5], weights[6])
     r = relu(n)
     m = maxout(r)
     m = np.reshape(m, (X.shape[0], -1))
     d = dropout(m, self.keep_prob)
     z = np.dot(d, weights[2])
     return -np.sum(
         np.sum((z - logsumexp(z, axis=1, keepdims=True)) * y,
                axis=1)) / X.shape[0]
Пример #2
0
def doPDE(values, movablePts, xPoints, yPoints, xIntPoints, yIntPoints):
    # Update the values based on diffusion of the proteins to nearby cells
    D = 0.1  # diffusion parameter
    valuesT = np.transpose(values)
    adjustmentPDEX = D * nonLinearAdjustment(xPoints)
    adjustmentPDEY = D * nonLinearAdjustment(yPoints)

    #simple diffusion is just a convolution
    convolveLinear = np.array([1 * D, -2 * D, 1 * D])
    # accumulate the changes due to diffusion
    for rep in range(50):
        # print(rep)
        newValuesX = list([])
        newValuesY = list([])
        for i in range(HowManyCells):
            row = values[i] + sig.convolve(
                values[i], convolveLinear)[1:-1]  #take off first and last
            rowY = valuesT[i] + sig.convolve(
                valuesT[i], convolveLinear)[1:-1]  #take off first and last
            # non-linear diffusion, add the adjustment
            if i in xIntPoints:
                row = row + np.multiply(row, adjustmentPDEX)
            if i in yIntPoints:
                rowY = rowY + np.multiply(rowY, adjustmentPDEY)
            newValuesX.append(row)
            newValuesY.append(rowY)

        #Merge rows and transposed columns
        values = np.array(newValuesX) + np.array(newValuesY).T
        # add source at each iteration
        values = values + addSources3(xPoints, yPoints)
        #Update transposed values
        valuesT = values.T
    # the total update returned is the difference between the original values and the values after diffusion
    return values
Пример #3
0
def initial_diffusion(mvble_pts, vesselPts, img, vesselImage):
    D = 0.1
    # D = 1
    B = D / 4

    curDiffuse = 0.0
    prevDiffuse = -1.0
    while True:
        if round(curDiffuse, 6) == round(prevDiffuse, 6):
            print('    curDiffuse: ', curDiffuse)
            print('    prevDiffuse: ', prevDiffuse)
            break
        prevDiffuse = curDiffuse
        # convolve = np.array([[1*D, 1*D, 1*D],[1*D,-8*D,1*D], [1*D, 1*D, 1*D]])
        convolve = np.array([[(1 - np.sqrt(2)) * D, 1 * D,
                              (1 - np.sqrt(2)) * D],
                             [1 * D, (-8 + (4 * np.sqrt(2))) * D, 1 * D],
                             [(1 - np.sqrt(2)) * D, 1 * D,
                              (1 - np.sqrt(2)) * D]])
        deltaDiffusion = sig.convolve(np.array(img),
                                      convolve)[1:-1,
                                                1:-1]  #take off first and last
        # the update to the img from one step of diffusion
        img = np.array(
            np.array(img) + np.array(deltaDiffusion) + vesselImage +
            np.array(nonlinearDiffusion(mvble_pts, img, D, convolve)))
        # print('HERS')
        # print(np.array(img))
        # os.sys.exit()
        img = img - (B * img)
        img = np.clip(img, 0, 1e9)
        curDiffuse = np.sum(img)

    return np.array(img)
Пример #4
0
def diffusion(mvble_pts, img):
    # D is the defusion constant
    # D = .225
    # B = D / 10
    D = 0.03
    B = D / 4
    # D = 0.00000001
    # B = D / 4

    #https://programtalk.com/python-examples/autograd.scipy.signal.convolve/
    for _ in range(0, 60):  # how many times you run a diffusion update
        convolve = np.array([[1 * D, 1 * D, 1 * D], [1 * D, -8 * D, 1 * D],
                             [1 * D, 1 * D, 1 * D]])
        deltaDiffusion = sig.convolve(np.array(img),
                                      convolve)[1:-1,
                                                1:-1]  #take off first and last
        # deltaDiffusion = deltaDiffusion + np.array(img)

        # the update to the img from one step of diffusion
        img = np.array(
            np.array(img) + np.array(deltaDiffusion) +
            np.array(nonlinearDiffusion(mvble_pts, img)))
        img = img - (B * img)

    np_img = np.array(img)
    mn, mx = np_img.min(), np_img.max()
    np_img = (np_img - mn) / (mx - mn)
    return np_img
Пример #5
0
def step_PDE(values, params, nonLinear=False, movablePts=[]):
    # Update the values based on diffusion of the proteins to nearby cells
    (sigmaNu, sigmaXm, muNu, muXm, k_out, k_in, k_p, k_i, k_l, Dn, Dx) = params
    diffusion = np.array([Dn, Dx])  #get the diffusion parameters
    pde_Delta = np.zeros((HowManyCells, VarCount))
    values = values.T  # by protein rather than cell
    newDiff = []
    for i in range(0, VarCount):  # for each protein
        D = diffusion[i]  #get the diffusion parameter
        if nonLinear:  # precompute the adjustments needed for the moveable points
            adjustmentPDE = D * nonLinearAdjustment(movablePts)
            #print(adjustmentPDE)
        #simple diffusion is just a convolution
        convolveLinear = np.array([1 * D, -2 * D, 1 * D])
        oneDif = np.zeros(HowManyCells)
        oldValues = values[i]
        # accumulate the changes due to diffusion
        for rep in range(0, 50):
            #linear diffusion
            oldValues = oldValues + sig.convolve(
                oldValues, convolveLinear)[1:-1]  #take off first and last
            # non-linear diffusion, add the adjustment
            if nonLinear:  #only if moving the vessels
                oldValues = oldValues + np.multiply(oldValues, adjustmentPDE)
        # the total update returned is the difference between the original values and the values after diffusion
        newDiff.append(oldValues - values[i])

    newDiff = np.array(newDiff)
    pde_Delta = pde_Delta + newDiff.T  #switch diff by cell order not protein order
    return newDiff.T
Пример #6
0
def convolvesame(m, n):
    size = len(m)
    con = signal.convolve(m, n, 'fft')
    pad = int((len(con) - size) / 2)

    same = con[pad:len(con) - pad, pad:len(con) - pad]
    return same
Пример #7
0
 def _feed(self, X, weights):
     c = convolve(X,
                  weights[0],
                  mode='valid',
                  axes=([1, 2], [2, 3]),
                  dot_axes=([3], [1]))
     n = batch_normalization(c, weights[3], weights[4])
     r = relu(n)
     m = maxout(r)
     c = convolve(m,
                  weights[1],
                  mode='valid',
                  axes=([2, 3], [2, 3]),
                  dot_axes=([1], [1]))
     n = batch_normalization(c, weights[5], weights[6])
     r = relu(n)
     m = maxout(r)
     m = np.reshape(m, (X.shape[0], -1))
     z = np.dot(m, weights[2])
     return softmax(z)
Пример #8
0
def regularization_term(interpol_image, image):
    """ The regularization term of the error.

        This term is responsible for smoothing the image according to
        the pixel differences present in the rgb image.
    """
    kernel = np.full((51, 51), -1)
    kernel[24, 24] = 2600

    b, g, r = np.split(image, 3, axis=2)

    b_diff = convolve(kernel, np.squeeze(b))
    g_diff = convolve(kernel, np.squeeze(g))
    r_diff = convolve(kernel, np.squeeze(r))

    image_diff = np.stack((b_diff, g_diff, r_diff), axis=-1)
    image_diff = np.linalg.norm(image_diff, axis=2)
    interpol_diff = convolve(interpol_image, kernel)
    error = (interpol_diff / image_diff) ** 2
    return -error
Пример #9
0
 def forward_pass(self, inputs, param_vector):
     # Input dimensions:  [data, 1, y, x]
     # Params dimensions: [1, filter, y, x]
     # Output dimensions: [data, filter, y, x]
     params = self.parser.get(param_vector, 'params')
     biases = self.parser.get(param_vector, 'biases')
     conv = convolve(inputs,
                     params,
                     axes=([2, 3], [2, 3]),
                     dot_axes=([1], [0]),
                     mode='valid')
     return conv + biases
Пример #10
0
        def J_bin(eps):
            """ Gives a number between 0 and 1 incidating how close each eps_r
                pixel is to being binarized, used to multiply with J()
            """
            # material density in design region
            rho = (eps - 1) / (self.eps_m - 1) * self.design_region

            # number of cells in design region
            N = npa.sum(self.design_region)

            # gray level map
            M_nd = 4 * rho * (1 - rho)

            (Nx, Ny) = eps.shape

            width = 1
            N_conv = min(Nx, Ny)
            k = np.zeros((N_conv))
            k[N_conv//2:N_conv//2 + width] = 1/width

            N_conv = 2
            k = np.zeros((N_conv))
            k[1] = 1

            penalty = 0.0
            for i in range(Nx):
                strip_i = M_nd[i,:]
                output_i = convolve(k, strip_i)
                # penalty = penalty + 4*npa.sum(output_i*(1-output_i))
                penalty = penalty + npa.sum(npa.abs(output_i))

            for j in range(Ny):
                strip_j = M_nd[:,j]
                output_j = convolve(k, strip_j.T)
                # penalty = penalty + 4*npa.sum(output_j*(1-output_j))
                penalty = penalty + npa.sum(npa.abs(output_j))

            # print(penalty, N)
            penalty = penalty / N / 2
            return 1 - penalty
Пример #11
0
def diffusion(mvble_pts, vesselPts, img, vesselImage):
    # D is the defusion constant
    # D = .225
    # B = D / 10
    # print(np.array(img))
    # img = img[1:-1, 1:-1]
    # os.sys.exit()
    D = 0.1
    # D = 1
    B = D / 4
    # D = 0.00000000
    # B = D / 4

    #https://programtalk.com/python-examples/autograd.scipy.signal.convolve/
    for _ in range(0, 60):  # how many times you run a diffusion update
        # convolve = np.array([[1*D, 1*D, 1*D],[1*D,-8*D,1*D], [1*D, 1*D, 1*D]])
        convolve = np.array([[(1 - np.sqrt(2)) * D, 1 * D,
                              (1 - np.sqrt(2)) * D],
                             [1 * D, (-8 + (4 * np.sqrt(2))) * D, 1 * D],
                             [(1 - np.sqrt(2)) * D, 1 * D,
                              (1 - np.sqrt(2)) * D]])

        deltaDiffusion = sig.convolve(np.array(img),
                                      convolve)[1:-1,
                                                1:-1]  #take off first and last
        # deltaDiffusion = deltaDiffusion + np.array(img)
        # the update to the img from one step of diffusion
        img = np.array(
            np.array(img) + np.array(deltaDiffusion) + vesselImage +
            np.array(nonlinearDiffusion(mvble_pts, img, D, convolve)))
        img = img - (B * img)
        img = np.clip(img, 0, 1e9)
    #     print(type(img))
    #     img_pic = np.pad(img, ((2, 3), (2, 3)), 'constant')
    #     plt.imsave('diffusePngs/TestDiffuse_'+str(i)+'.png', np.rot90(np.array(list(img_pic))), cmap='jet')

    # path_to_img_dir = 'diffusePngs/'
    # images = []
    # for file_name in natsorted(os.listdir(path_to_img_dir), key=lambda y: y.lower()):
    #     if file_name.endswith('.png'):
    #         file_path = os.path.join(path_to_img_dir, file_name)
    #         images.append(imageio.imread(file_path))
    # imageio.mimsave('VascDiffuse.gif', images, fps=5)

    # newimg = np.array(minmax_scale(np.array(img._value)))
    # np_img = np.array(img)
    # mn, mx = np_img.min(), np_img.max()
    # np_img = (np_img - mn) / (mx - mn)
    # print(np.array(img).max(), np.array(img).min())
    print('Diffusion Nut Count: ', np.sum(img))
    return np.array(img)
Пример #12
0
def forward(model, inputs=None, hps=None):
    conv_act = hps['c1_activation'](np.add(
        signal.convolve(inputs, model['input']['c1']['weights']),
        model['input']['c1']['bias'])).reshape(
            inputs.shape[0], -1)  # <-- flatten final activations

    dense1_act = hps['d1_activation'](np.add(
        np.matmul(conv_act, model['c1']['d1']['weights']),
        model['c1']['d1']['bias']))

    output_act = hps['output_activation'](np.add(
        np.matmul(dense1_act, model['d1']['output']['weights']),
        model['d1']['output']['bias']))
    return output_act
Пример #13
0
 def forward(self, weights, inputs):
     # Input dims are [num_weight_sets,
     w, b = self.unpack_params(weights)
     convs = []
     for i in range(len(w)):
         conv = convolve(inputs[i, :],
                         w[i, :],
                         axes=([2, 3], [2, 3]),
                         dot_axes=([1], [0]),
                         mode='valid')
         conv = conv + b[i, :]
         convs.append(self.nonlinearity(conv))
     z = agnp.array(convs)
     return z
Пример #14
0
def convolve(A,
             B,
             axes=None,
             dot_axes=[(), ()],
             mode='full',
             accelerated=torch_accelerated):
    args_are_implemented = check_implemented(axes, dot_axes, mode)
    if accelerated and args_are_implemented:
        return _torch_convolve(A, B, axes=axes, dot_axes=dot_axes, mode=mode)
    else:
        return _autograd_signal.convolve(A,
                                         B,
                                         axes=axes,
                                         dot_axes=dot_axes,
                                         mode=mode)
Пример #15
0
def doPDE(values, movablePts = [HowManyCells/2+0.1]):
    # Update the values based on diffusion of the proteins to nearby cells
    values = values.T # by protein rather than cell
    D = 0.01#get the diffusion parameter
    adjustmentPDE = D * nonLinearAdjustment(movablePts)
    print(adjustmentPDE)
    #simple diffusion is just a convolution
    convolveLinear = np.array([1*D,-2*D,1*D]) 
    oldValues =  values
    # accumulate the changes due to diffusion 
    for rep in range(0, 1):
        #linear diffusion
        oldValues =  oldValues + sig.convolve(oldValues, convolveLinear)[1:-1] #take off first and last
        # non-linear diffusion, add the adjustment
        oldValues = oldValues + np.multiply(oldValues, adjustmentPDE)
    # the total update returned is the difference between the original values and the values after diffusion
    return oldValues
Пример #16
0
def doPDE(values, movablePts):
    #movablePts = list(movablePts)
    # Update the values based on diffusion of the proteins to nearby cells
    D = 0.1  #get the diffusion parameter
    adjustmentPDE = D * nonLinearAdjustment(movablePts)
    #print(nonLinearAdjustment(movablePts))
    #simple diffusion is just a convolution
    convolveLinear = np.array([1 * D, -2 * D, 1 * D])
    # accumulate the changes due to diffusion
    for rep in range(0, 50):
        #linear diffusion
        values = values + sig.convolve(
            values, convolveLinear)[1:-1]  #take off first and last
        # non-linear diffusion, add the adjustment
        values = values + np.multiply(values, adjustmentPDE)
        # add source at each iteration
        values = values + addSources(movablePts)
    # the total update returned is the difference between the original values and the values after diffusion
    return values
Пример #17
0
def step_PDE(values, params, vas_structure, shape_of_img, nonLinear = False, movablePts = []):
    # Update the values based on diffusion of the proteins to nearby cells
    (sigmaNu, sigmaXm, muNu, muXm, k_out, k_in, k_p, k_i, k_l, Dn, Dx) = params
    diffusion = np.array([Dn, Dx]) #get the diffusion parameters
    # pde_Delta = np.zeros((HowManyCells, VarCount))
    pde_Delta = np.zeros(shape_of_img + (VarCount,), dtype=np.float64)
    values = values.T # by protein rather than cell
    newDiff = []
    for i in range(0, VarCount): # for each protein
        D = diffusion[i] #get the diffusion parameter
        if nonLinear: # precompute the adjustments needed for the moveable points
            # start = time.time()
            adjustmentPDE = D * nonLinearAdjustment(movablePts, shape_of_img)
            # end = time.time()
            # print('Nonlinear Adjust = ', end-start, 'seconds')
            #print(adjustmentPDE)
        #simple diffusion is just a convolution
        # convolveLinear = np.array([1*D,-2*D,1*D]) 
        convolveLinear = D * np.array([[1, 1, 1],
                                       [1, -8, 1],
                                       [1, 1, 1]])
        # convolveLinear = np.array([[(1-np.sqrt(2))*D, 1*D,(1-np.sqrt(2))*D],
        #                      [1*D,(-8+(4*np.sqrt(2)))*D,1*D],
        #                      [(1-np.sqrt(2))*D, 1*D, (1-np.sqrt(2))*D]])

        oldValues =  values[i]
        # accumulate the changes due to diffusion 
        for rep in range(0, 50):
            #linear diffusion
            oldValues =  oldValues + sig.convolve(oldValues, convolveLinear)[1:-1, 1:-1] #take off first and last
            # non-linear diffusion, add the adjustment
            if nonLinear: #only if moving the vessels
               oldValues = oldValues + np.multiply(oldValues, adjustmentPDE)
        # the total update returned is the difference between the original values and the values after diffusion
        newDiff.append(oldValues - values[i])
        
    newDiff = np.array(newDiff)
    pde_Delta = pde_Delta + newDiff.T #switch diff by cell order not protein order
    return newDiff.T 
Пример #18
0
#w_true = np.abs(np.random.rand(Ndata))+1;

#true grid needs to be set up with noise
w_true_grid = np.zeros((n_grid,n_grid))
for x,y, w in zip(x_true,y_true, w_true): 
    w_true_grid[np.argmin(np.abs(theta_grid - x)),np.argmin(np.abs(theta_grid - y))] = w
#zero pad true_grid and psf
'''
wtg = np.zeros((9,9));
wtg[2:7,2:7] = w_true_grid;
p = np.zeros((9,9));
p[2:7,2:7] = psf;
'''
data = np.real(fft.ifft2(fft.fft2(w_true_grid)*fft.fft2(psf))) + sig_noise* np.random.randn(n_grid,n_grid);
data2 = Psi(w_true_grid) + sig_noise * np.random.randn(n_grid,n_grid);
data3 = signal.convolve(w_true_grid,psf);
diff = int((len(data3[:,0]) - n_grid)/2);
data3 = data3[diff:n_grid+diff,diff:n_grid+diff]
#data4 = np.real(fft.ifft2(fft.fft2(wtg)*fft.fft2(p)))
data4 = np.real(fft.ifft2(fft.ifft2(w_true_grid)))*n_grid**2;
fig, ax = plt.subplots(1,2)
ax[0].imshow(w_true_grid);
ax[0].set_title('True Positions')
#ax[1].imshow(data3[:-4,:-4]);
ax[1].imshow(data4);
ax[1].set_title('Observed Data')
plt.show();

#create fft of psf
psf_k = fft.fft2(psf);
Пример #19
0
 def forward_pass(self, data, params):
     filters = params.get(self.get_params_shape())
     return convolve(data, filters, axes=([2, 3], [2, 3]), dot_axes = ([1], [0]), mode='valid')
Пример #20
0
def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    return 0.5 * np.sum(
        (aSignal.convolve(ws, _psf) - data_p)**2) / sig_noise**2
Пример #21
0
def multislice_propagate_cnn(grid_delta,
                             grid_beta,
                             probe_real,
                             probe_imag,
                             energy_ev,
                             psize_cm,
                             kernel_size=17,
                             free_prop_cm=None,
                             debug=False):

    assert kernel_size % 2 == 1, 'kernel_size must be an odd number.'
    n_batch, shape_y, shape_x, n_slice = grid_delta.shape
    lmbda_nm = 1240. / energy_ev
    voxel_nm = np.array(psize_cm) * 1.e7
    delta_nm = voxel_nm[-1]
    k = 2. * np.pi * delta_nm / lmbda_nm
    grid_shape = np.array(grid_delta.shape[1:])
    size_nm = voxel_nm * grid_shape
    mean_voxel_nm = np.prod(voxel_nm)**(1. / 3)

    # print('Critical distance is {} cm.'.format(psize_cm[0] * psize_cm[1] * grid_delta.shape[1] / (lmbda_nm * 1e-7)))

    if kernel_size % 2 == 0:
        warnings.warn('Kernel size should be odd.')
    # kernel = get_kernel(delta_nm, lmbda_nm, voxel_nm, np.array(grid_delta.shape[1:]))
    kernel = get_kernel(delta_nm, lmbda_nm, voxel_nm, grid_shape - 1)
    kernel = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel)))
    # dxchange.write_tiff(np.abs(kernel), 'test/kernel_abs', dtype='float32')
    # dxchange.write_tiff(np.angle(kernel), 'test/kernel_phase', dtype='float32')
    # raise Exception

    kernel_mid = ((np.array(kernel.shape) - 1) / 2).astype('int')
    half_kernel_size = int((kernel_size - 1) / 2)
    kernel = kernel[kernel_mid[0] - half_kernel_size:kernel_mid[0] +
                    half_kernel_size + 1, kernel_mid[1] -
                    half_kernel_size:kernel_mid[1] + half_kernel_size + 1]
    # kernel = get_kernel_ir_real(delta_nm, lmbda_nm, voxel_nm, [kernel_size, kernel_size, 256])
    # kernel /= kernel.size
    pad_len = (kernel_size - 1) // 2

    # probe_real = np.pad(probe_real, [[pad_len, pad_len], [pad_len, pad_len]], mode='constant', constant_values=1.0)
    # probe_imag = np.pad(probe_real, [[pad_len, pad_len], [pad_len, pad_len]], mode='constant', constant_values=0)
    probe = probe_real + 1j * probe_imag
    probe_size = probe.shape
    probe = np.tile(probe, [n_batch, 1, 1])

    # grid_delta = np.pad(grid_delta, [[0, 0], [pad_len, pad_len], [pad_len, pad_len], [0, 0]], mode='constant', constant_values=0)
    # grid_beta = np.pad(grid_beta, [[0, 0], [pad_len, pad_len], [pad_len, pad_len], [0, 0]], mode='constant', constant_values=0)

    probe_array = []

    # Build cyclic convolution matrix for kernel
    # kernel_mat = np.zeros([np.prod(probe_size)] * 2)
    # kernel_full_00 = np.zeros(probe_size)
    # kernel_full_00[:kernel_size, :kernel_size] = kernel
    # kernel_full_00 = np.roll(kernel_full_00, -half_kernel_size, axis=0)
    # kernel_full_00 = np.roll(kernel_full_00, -half_kernel_size, axis=1)
    # kernel_mat[0, :] = kernel_full_00.flatten()
    # for i in trange(probe_size[0]):
    #     for j in range(probe_size[1]):
    #         if i != 0 or j != 0:
    #             kernel_temp = np.roll(kernel_full_00, i, axis=0)
    #             kernel_temp = np.roll(kernel_temp, j, axis=1)
    #             kernel_mat[i * probe_size[1] + j, :] = kernel_temp.flatten()

    t0 = time.time()

    edge_val = 1.0

    initial_int = probe[0, 0, 0]
    for i_slice in trange(n_slice):
        this_delta_batch = grid_delta[:, :, :, i_slice]
        this_beta_batch = grid_beta[:, :, :, i_slice]
        # this_delta_batch = np.squeeze(this_delta_batch)
        # this_beta_batch = np.squeeze(this_beta_batch)
        c = np.exp(1j * k * this_delta_batch - k * this_beta_batch)
        probe = probe * c
        # print(probe.shape, kernel.shape)
        # probe = scipy.signal.convolve2d(np.squeeze(probe), kernel, mode='same', boundary='wrap', fillvalue=1)
        # probe = np.reshape(probe, [1, probe.shape[0], probe.shape[1]])

        probe = np.pad(probe, [[0, 0], [pad_len, pad_len], [pad_len, pad_len]],
                       mode='constant',
                       constant_values=edge_val)
        # probe = np.pad(probe, [[0, 0], [pad_len, pad_len], [pad_len, pad_len]], mode='wrap')
        probe = convolve(probe, kernel, mode='valid', axes=([1, 2], [0, 1]))

        # probe = np.reshape(probe, [n_batch, np.prod(probe_size)])
        # probe = probe.dot(kernel_mat.T)
        # probe = np.reshape(probe, [n_batch, *probe_size])

        edge_val = sum(kernel.flatten() * edge_val)
        # print(probe.shape)
        # probe = ifft2(np_ifftshift(np_fftshift(fft2(probe)) * np_fftshift(fft2(kernel))))
        # probe = ifft2(np_ifftshift(np_fftshift(fft2(probe)) * kernel))

        # re-normalize to 1
        # probe *= 1. / np.mean(np.abs(probe))

        probe_array.append(np.abs(probe))

    final_int = probe[0, 0, 0]
    probe *= (initial_int / final_int)

    if free_prop_cm is not None:
        #1dxchange.write_tiff(abs(wavefront), '2d_1024/monitor_output/wv', dtype='float32', overwrite=True)
        if free_prop_cm == 'inf':
            probe = np.fft.fftshift(np.fft.fft2(probe), axes=[1, 2])
        else:
            dist_nm = free_prop_cm * 1e7
            l = np.prod(size_nm)**(1. / 3)
            crit_samp = lmbda_nm * dist_nm / l
            algorithm = 'TF' if mean_voxel_nm > crit_samp else 'IR'
            # print(algorithm)
            algorithm = 'TF'
            if algorithm == 'TF':
                h = get_kernel(dist_nm, lmbda_nm, voxel_nm, grid_shape)
                probe = np.fft.ifft2(
                    np.fft.ifftshift(
                        np.fft.fftshift(np.fft.fft2(probe), axes=[1, 2]) * h,
                        axes=[1, 2]))
            else:
                h = get_kernel_ir(dist_nm, lmbda_nm, voxel_nm, grid_shape)
                probe = np.fft.ifft2(
                    np.fft.ifftshift(
                        np.fft.fftshift(np.fft.fft2(probe), axes=[1, 2]) * h,
                        axes=[1, 2]))

    if debug:
        return probe, probe_array, time.time() - t0
    else:
        return probe