Exemplo n.º 1
0
def mlopTomoPoisson(proj_data, weight_data, A, rec_params):
    """Function for Max. likelihood based on the opTomo function and a quadratic approximation to the log-likelihood term. This uses less GPU memory but moves large arrays to and from GPU (sub-optimal) 
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            weight_data : A num_rows X num_angles X num_columns array containting the noise variance values 
            A : Spot operator based forward projection matrix 
            rec_params: Dictionary of parameters associated with the reconstruction algorithm 
    Output : recon : A num_rows X num_cols X num_cols array  
    """
    MIN_ITER = 5
    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    #Array to save recon
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)
    weight_data = weight_data.reshape(proj_size).astype(np.float32)

    #Compute Lipschitz of gradient

    #temp_backproj=LipschitzForward(vol_size,A,weight_data)
    #L = temp_backproj.max()
    #del temp_backproj

    eig_val, L = powerIter(vol_size, A, weight_data, 50)
    del eig_val

    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    x_prev = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    temp_cost = np.zeros(1, dtype=np.float32)
    cost = np.zeros(rec_params['num_iter'])

    #ML loop x_k+1 = x_k + func(gradient)
    for iter_num in range(rec_params['num_iter']):

        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))

        error = (A * x_recon) - proj_data
        gradient = A.T * (weight_data * error)

        #Cost compute for Debugging
        if rec_params['debug'] == True:
            temp_cost_forward = 0.5 * (error * weight_data * error).sum()
            cost[iter_num] = temp_cost_forward + temp_cost
            print('Forward Cost %f' % (temp_cost_forward))
            temp_cost = temp_cost * 0
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')
                t_nes = 1  #reset momentum; adaptive re-start

        x_prev = np.copy(x_recon)
        x_recon, z_recon, t_nes = nesterovOGM2update(x_recon, z_recon, t_nes,
                                                     gradient, L)

        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            break

    elapsed_time = time.time() - t
    if (rec_params['verbose']):
        print('Time for %d ML iterations = %f' %
              (rec_params['num_iter'], elapsed_time))
    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    return recon, cost
Exemplo n.º 2
0
def mbiropDeblurTomoPoisson(proj_data, weight_data, A, H, rec_params):
    """Function for MBIR based on the opTomo function and a quadratic approximation to the log-likelihood term. This uses less GPU memory but moves large arrays to and from GPU (sub-optimal). Forward model is a tomographic projector followed by a blur kernel  
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            weight_data : A num_rows X num_angles X num_columns array containting the noise variance values 
            A : Tomographic projector based on ASTRA + spot operator
            H : Blurring kernel of size num_angles X n_x X n_y  
            rec_params : A dictionary with keys for various parameters of any potential reconstruction algorithm
                       'gpu_index' : Index of GPU to be used 
                       'num_iter' : Number of MBIR iterations 
                       'MRF_P' : MRF order parameter
                       'MRF_SIGMA' : regulariztion parameter/scale parameter for MRF
    Output : recon : A num_rows X num_y X num_x array 
    """
    MIN_ITER = 5
    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    proj_shape = [det_row, num_views, det_col]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    #Prior model initializations
    mrf_cost, grad_prior, hessian_prior = qGGMRFfuncs()

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    #Array to save recon
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)
    weight_data = weight_data.reshape(proj_size).astype(np.float32)

    #Compute upperbound on Lipschitz of gradient
    temp_backproj = np.zeros(vol_size).astype(
        np.float32
    )  #*LipschitzForwardBlurTomo(vol_size,proj_shape,A,H,weight_data)
    x_ones = np.ones(vol_size, dtype=np.float32)
    hessian_prior(x_ones, temp_backproj, vol_z, vol_y, vol_x,
                  rec_params['MRF_SIGMA'])
    L = temp_backproj.max()

    eig_val, L_f = powerIterBlurTomo(vol_size, proj_shape, A, H, weight_data,
                                     50)
    del eig_val
    L += L_f

    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))
    del x_ones, temp_backproj

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    temp_cost = np.zeros(1, dtype=np.float32)
    cost = np.zeros(rec_params['num_iter'])
    #MBIR loop x_k+1 = x_k + func(gradient)
    for iter_num in range(rec_params['num_iter']):
        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))
        error = forwardProject(A, H, x_recon, proj_shape) - proj_data
        gradient = backProject(A, H, weight_data * error, proj_shape)
        #Cost compute for Debugging
        if rec_params['debug'] == True:
            temp_cost_forward = 0.5 * (error * weight_data * error).sum()
            mrf_cost(x_recon, temp_cost, vol_z, vol_y, vol_x,
                     rec_params['MRF_P'], rec_params['MRF_SIGMA'])
            cost[iter_num] = temp_cost_forward + temp_cost
            print('Forward Cost %f, Prior Cost %f' %
                  (temp_cost_forward, temp_cost[0]))
            temp_cost = temp_cost * 0
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')
                t_nes = 1  #reset momentum; adaptive re-start
        grad_prior(x_recon, gradient, vol_z, vol_y, vol_x, rec_params['MRF_P'],
                   rec_params['MRF_SIGMA'])  #accumulates gradient from prior
        x_prev = x_recon
        x_recon, z_recon, t_nes = nesterovOGM2update(x_recon, z_recon, t_nes,
                                                     gradient, L)
        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            break
        gc.collect(
        )  #the call to the C-code grad_prior seems to cause memory to grow; this is a basic fix. TODO: Better memory fix

    elapsed_time = time.time() - t
    if (rec_params['verbose']):
        print('Time for %d iterations = %f' %
              (rec_params['num_iter'], elapsed_time))
    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    return recon, cost
Exemplo n.º 3
0
def mlCudaDebluropTomo(proj_data, weight_data, A, H, rec_params):
    """Function for GPU based ML estimate based on the Deblur+Project forward. (TODO: Debug)
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            A         : Forward projection matrix 
            H         : FFT of blur kernel for each view 
            rec_params: Dictionary of parameters associated with the reconstruction algorithm 
    Output : recon : A num_rows X num_cols X num_cols array  
    """
    MIN_ITER = 5
    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    proj_shape = [det_row, num_views, det_col]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    #Array to save recon
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)
    weight_data = weight_data.reshape(proj_size).astype(np.float32)

    #Compute Lipschitz of gradient
    #temp_backproj=LipschitzForwardBlurTomo(vol_size,proj_shape,A,H,weight_data)
    #L = temp_backproj.max()
    #del temp_backproj

    eig_val, L = powerIterBlurTomo(vol_size, proj_shape, A, H, weight_data, 50)
    del eig_val

    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    cost = np.zeros(rec_params['num_iter'])
    #ML loop x_k+1 = x_k + func(gradient)
    for iter_num in range(rec_params['num_iter']):
        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))
        error = forwardProject(A, H, x_recon, proj_shape) - proj_data
        gradient = backProject(A, H, weight_data * error, proj_shape)
        #Cost compute for Debugging
        if rec_params['debug'] == True:
            cost[iter_num] = 0.5 * (error * weight_data * error).sum()
            print('Forward Cost %f' % (cost[iter_num]))
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')
        x_prev = np.copy(x_recon)
        #x_recon = gradDescentupdate(x_recon,gradient,1.0/rec_params['step_size'])
        x_recon, z_recon, t_nes = nesterovOGM2update(
            x_recon, z_recon, t_nes, gradient,
            L)  #1.0/rec_params['step_size'])
        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            break

    elapsed_time = time.time() - t
    if (rec_params['verbose']):
        print('Time for %d iterations = %f' %
              (rec_params['num_iter'], elapsed_time))
    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    return recon, cost
Exemplo n.º 4
0
def mbiropTomoTalwar(proj_data, weight_data, A, rec_params):
    """Function for MBIR based on the opTomo function and a Talwar function for log-likelihood term. 
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            weight_data : A num_rows X num_angles X num_columns array containting the noise variance values 
            A : Spot operator based forward projection matrix 
            rec_params : A dictionary with keys for various parameters of any potential reconstruction algorithm
                       'gpu_index' : Index of GPU to be used 
                       'num_iter' : Number of MBIR iterations 
                       'reg_param' : regulariztion parameter/scale parameter for MRF
                       'reject_frac'   : Threshold for generalized Huber function for likelihood  
    Output : recon : A num_rows X num_columns X num_columns array 
    """
    REJECT_STEP = 5  #Step size for progressive rejection of outliers (0-100)
    NUM_INNER_ITER = 50  #Number of iterations to run with a fixed rejection threshold
    MIN_ITER = NUM_INNER_ITER * REJECT_STEP + 10  #Min iter after which to terminate algorithm
    PROGRESSIVE_UPDATE = True
    sigma = 1

    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    reject_frac = rec_params['reject_frac']

    #Prior model initializations
    mrf_cost, grad_prior, hessian_prior = qGGMRFfuncs()

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)
    weight_data = weight_data.reshape(proj_size).astype(np.float32)

    #Array to save recon
    #TODO: The logic here has to be fixed. This assumes that if there is in intial input we are in the multi-resolution mode of operation
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
        error = (A * x_recon) - proj_data
        if (rec_params['verbose']):
            print('Target rejection fraction = %f percent' % reject_frac)
        huber_T = np.percentile(np.fabs(error) * np.sqrt(weight_data),
                                100 - reject_frac,
                                interpolation='nearest')
        if (
                x_recon.max() == 0
        ):  #TODO: HACK for multi-resolution. At coarsest resolution, use quadratic model
            huber_T = 1e5  #Infinite
        weight_new = np.ascontiguousarray(np.zeros(proj_size,
                                                   dtype=np.float32))
        weight_new = computeGenHuberWeight(error, sigma, huber_T, 0,
                                           weight_data, weight_new)
        PROGRESSIVE_UPDATE = False
        if (rec_params['verbose']):
            print('Initializing volume..')
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        weight_new = np.ascontiguousarray(np.zeros(proj_size,
                                                   dtype=np.float32))

    if (rec_params['verbose']):
        print('Starting %d MBIR iterations ..' % (rec_params['num_iter']))

    #Compute Lipschitz of gradient
    x_ones = np.ones(vol_size, dtype=np.float32)
    A_ones = A * x_ones
    temp_backproj = A.T * (weight_data * A_ones)  #At*W*A
    L_data = temp_backproj.max() / (sigma**2)
    temp_backproj *= 0
    hessian_prior(x_ones, temp_backproj, vol_z, vol_y, vol_x,
                  rec_params['MRF_SIGMA'])
    L_prior = temp_backproj.max()
    L = L_data + L_prior
    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))
    del x_ones, temp_backproj, A_ones

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    anomaly_classifier = np.zeros(proj_size, dtype=np.uint8)
    temp_cost = np.zeros(1, dtype=np.float32)
    cost = np.zeros(rec_params['num_iter'])
    momentum = np.zeros(rec_params['num_iter'])
    #MBIR loop x_k+1 = x_k + func(gradient)
    error = (A * x_recon) - proj_data
    for iter_num in range(rec_params['num_iter']):
        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))
        if (np.mod(iter_num, NUM_INNER_ITER) == 0
                and PROGRESSIVE_UPDATE == True
                and iter_num <= NUM_INNER_ITER * REJECT_STEP):
            t_nes = 1  #Reset momentum
            curr_rej_frac = np.min([(np.float(iter_num) / NUM_INNER_ITER) *
                                    reject_frac / REJECT_STEP, reject_frac])
            if (rec_params['verbose']):
                print('Current rejection fraction = %f' % curr_rej_frac)
            huber_T = np.percentile(np.fabs(error) * np.sqrt(weight_data),
                                    100 - curr_rej_frac,
                                    interpolation='nearest')
            if (iter_num == 0):  #First time select all measurements
                huber_T = 5e10

        weight_new = computeTalwarWeight(error, sigma, huber_T, 0, weight_data,
                                         weight_new)  #Compute weight matrix

        #Cost compute for Debugging
        if rec_params['debug'] == True:
            momentum[iter_num] = t_nes
            temp_cost_forward = computeGenHuberCost(error, weight_new, sigma,
                                                    huber_T, 0)
            mrf_cost(x_recon, temp_cost, vol_z, vol_y, vol_x,
                     rec_params['MRF_P'], rec_params['MRF_SIGMA'])
            cost[iter_num] = temp_cost_forward + temp_cost
            print('Forward Cost %f, Prior Cost %f' %
                  (temp_cost_forward, temp_cost[0]))
            temp_cost = temp_cost * 0
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')

        #Update the volume
        error = (A * x_recon) - proj_data
        gradient = (A.T * (weight_new * error)) / sigma**2
        grad_prior(x_recon, gradient, vol_z, vol_y, vol_x, rec_params['MRF_P'],
                   rec_params['MRF_SIGMA'])  #accumulates gradient from prior

        x_prev = x_recon
        #Take a step to decrease cost function value w.r.t volume
        x_recon, z_recon, t_nes = nesterovOGM2update(x_recon, z_recon, t_nes,
                                                     gradient, L)
        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            print('Number of iterations to convergence %d' % iter_num)
            break
        gc.collect(
        )  #the call to the C-code grad_prior seems to cause memory to grow; this is a basic fix. TODO: Better memory fix

    elapsed_time = time.time() - t
    #if(rec_params['verbose']):
    print('Time for %d iterations = %f' %
          (rec_params['num_iter'], elapsed_time))

    weight_mask = np.where((np.fabs(error) * np.sqrt(weight_data)) > huber_T)
    anomaly_classifier[weight_mask] = 1
    anomaly_classifier = anomaly_classifier.reshape(det_row, num_views,
                                                    det_col)

    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    if rec_params['debug'] == True:
        from matplotlib import pyplot as plt
        plt.plot(momentum)
        plt.xlabel('Iter')
        plt.ylabel('Momentum')
        plt.show()

    return recon, cost, anomaly_classifier
Exemplo n.º 5
0
def mbiropTomoPoisson(proj_data, weight_data, A, rec_params):
    """Function for MBIR based on the opTomo function and a quadratic approximation to the log-likelihood term. This uses less GPU memory but moves large arrays to and from GPU (sub-optimal) 
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            weight_data : A num_rows X num_angles X num_columns array containting the noise variance values 
            A : Spot operator based forward projection matrix 
            rec_params: Dictionary of parameters associated with the reconstruction algorithm 
    Output : recon : A num_rows X num_cols X num_cols array  
    """
    MIN_ITER = 5
    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    #Prior model initializations
    mrf_cost, grad_prior, hessian_prior = qGGMRFfuncs()

    #Function to compute quadratic majorizer for non-linear conjugate gradient inner loop
    #ncg_params = ncg_qGGMRF_funcs()

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    #Array to save recon
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)
    weight_data = weight_data.reshape(proj_size).astype(np.float32)

    #Compute Lipschitz of gradient
    #temp_backproj=np.zeros(vol_size).astype(np.float32)
    temp_backproj = LipschitzForward(vol_size, A, weight_data)
    x_ones = np.ones(vol_size, dtype=np.float32)
    hessian_prior(x_ones, temp_backproj, vol_z, vol_y, vol_x,
                  rec_params['MRF_SIGMA'])
    L = temp_backproj.max()

    #eig_vec,L_f=powerIter(vol_size,A,weight_data,50)
    #del eig_vec
    #L= L + L_f

    #Diagonal majorizer
    #D= 1.0/temp_backproj
    #D[np.isnan(D)] = 1.0/L
    #D[np.isinf(D)] = 1.0/L
    #print('Min, mean, max of diagonal step size (%f,%f,%f)' %(D.min(),D.mean(),D.max()))
    #D=D*0 + 1.0/L

    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))
    del x_ones, temp_backproj

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    x_prev = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    temp_cost = np.zeros(1, dtype=np.float32)
    cost = np.zeros(rec_params['num_iter'])

    #gradient_prev = np.ascontiguousarray(np.zeros(vol_size,dtype=np.float32))
    #cg_dir = np.ascontiguousarray(np.zeros(vol_size,dtype=np.float32))

    #MBIR loop x_k+1 = x_k + func(gradient)
    for iter_num in range(rec_params['num_iter']):

        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))

        #gradient_prev = np.copy(gradient)  #(for SD/NCG)
        error = (A * x_recon) - proj_data
        gradient = A.T * (weight_data * error)

        #Cost compute for Debugging
        if rec_params['debug'] == True:
            temp_cost_forward = 0.5 * (error * weight_data * error).sum()
            mrf_cost(x_recon, temp_cost, vol_z, vol_y, vol_x,
                     rec_params['MRF_P'], rec_params['MRF_SIGMA'])
            cost[iter_num] = temp_cost_forward + temp_cost
            print('Forward Cost %f, Prior Cost %f' %
                  (temp_cost_forward, temp_cost[0]))
            temp_cost = temp_cost * 0
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')
                t_nes = 1  #reset momentum; adaptive re-start

        grad_prior(x_recon, gradient, vol_z, vol_y, vol_x, rec_params['MRF_P'],
                   rec_params['MRF_SIGMA'])  #accumulates gradient from prior

        x_prev = np.copy(x_recon)
        x_recon, z_recon, t_nes = nesterovOGM2update(x_recon, z_recon, t_nes,
                                                     gradient, L)

        #if(iter_num ==0): #NCG
        #gradient_prev = np.copy(gradient)  #(for SD/NCG)
        #cg_dir = np.copy(gradient)
        #NCG
        #x_recon,cg_dir = ncgQMupdate(x_recon,-gradient,-gradient_prev,cg_dir,2,A,weight_data,error,ncg_params,vol_z,vol_x,vol_y,rec_params['MRF_P'],rec_params['MRF_SIGMA'])

        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            break
        gc.collect(
        )  #the call to the C-code grad_prior seems to cause memory to grow; this is a basic fix. TODO: Better memory fix

    elapsed_time = time.time() - t
    if (rec_params['verbose']):
        print('Time for %d iterations = %f' %
              (rec_params['num_iter'], elapsed_time))
    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    return recon, cost
Exemplo n.º 6
0
def mbiropTomo(proj_data, A, rec_params):
    """Function for MBIR based on the opTomo function. This uses less GPU memory but moves large arrays to and from GPU (sub-optimal) 
    Inputs: proj_data : A num_rows X num_angles X num_columns array 
            A : Spot operator based forward projection matrix 
            rec_params: Dictionary of parameters associated with the reconstruction algorithm 
    Output : recon : A num_rows X num_cols X num_cols array  
    """

    DEFAULT_STOP_THRESH = 1
    MIN_ITER = 5

    det_row = proj_data.shape[0]
    num_views = proj_data.shape[1]
    det_col = proj_data.shape[2]

    vol_z = rec_params['n_vox_z']
    vol_x = rec_params['n_vox_x']
    vol_y = rec_params['n_vox_y']

    #Prior model initializations
    mrf_cost, grad_prior, hessian_prior = qGGMRFfuncs()

    vol_size = vol_z * vol_y * vol_x
    proj_size = det_row * det_col * num_views

    #Array to save recon
    if 'x_init' in rec_params.keys():
        x_recon = rec_params['x_init'].reshape(vol_size)
        z_recon = rec_params['x_init'].reshape(vol_size)
    else:
        x_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
        z_recon = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))

    #Flatten projection data to a vector
    proj_data = proj_data.reshape(proj_size).astype(np.float32)

    if (rec_params['verbose']):
        print('Starting %d MBIR iterations ..' % (rec_params['num_iter']))

    #Compute Lipschitz of gradient
    temp_backproj = LipschitzForward(vol_size, A, np.ones(*proj_data.shape))
    x_ones = np.ones(vol_size, dtype=np.float32)
    hessian_prior(x_ones, temp_backproj, vol_z, vol_y, vol_x,
                  rec_params['MRF_SIGMA'])
    L = temp_backproj.max()

    if (rec_params['verbose']):
        print('Lipschitz constant = %f' % (L))

    del x_ones, temp_backproj

    #Initialize variables for Nesterov method
    #ASSUME both x and z are set to zero
    t_nes = 1
    t = time.time()
    gradient = np.ascontiguousarray(np.zeros(vol_size, dtype=np.float32))
    temp_cost = np.zeros(1, dtype=np.float32)
    cost = np.zeros(rec_params['num_iter'])
    #MBIR loop x_k+1 = x_k + func(gradient)
    for iter_num in range(rec_params['num_iter']):
        if (rec_params['verbose']):
            print('Iter number %d of %d in %f sec' %
                  (iter_num, rec_params['num_iter'], time.time() - t))
        error = (A * x_recon) - proj_data
        gradient = A.T * error
        #Cost compute for Debugging
        if rec_params['debug'] == True:
            temp_cost_forward = 0.5 * (error * error).sum()
            mrf_cost(x_recon, temp_cost, vol_z, vol_y, vol_x,
                     rec_params['MRF_P'], rec_params['MRF_SIGMA'])
            cost[iter_num] = temp_cost_forward + temp_cost
            print('Forward Cost %f, Prior Cost %f' %
                  (temp_cost_forward, temp_cost[0]))
            temp_cost = temp_cost * 0
            if (iter_num > 0 and (cost[iter_num] - cost[iter_num - 1]) > 0):
                print('Cost went up!')
                t_nes = 1  #reset momentum
        grad_prior(x_recon, gradient, vol_z, vol_y, vol_x, rec_params['MRF_P'],
                   rec_params['MRF_SIGMA'])  #accumulates gradient from prior
        x_prev = x_recon
        x_recon, z_recon, t_nes = nesterovOGM2update(x_recon, z_recon, t_nes,
                                                     gradient, L)
        if iter_num > MIN_ITER and stoppingCritVol(x_recon, x_prev,
                                                   rec_params['stop_thresh'],
                                                   rec_params['roi_mask']):
            break
        gc.collect(
        )  #the call to the C-code grad_prior seems to cause memory to grow; this is a basic fix. TODO: Better memory fix

    elapsed_time = time.time() - t
    if (rec_params['verbose']):
        print('Time for %d iterations = %f' %
              (rec_params['num_iter'], elapsed_time))

    recon = x_recon.reshape(vol_z, vol_y, vol_x)
    return recon, cost