Exemplo n.º 1
0
def QuasiNewtonAlgorithm(X0, func, func_grad, func_hessian, options):

    epsilon = 10e-6
    reset_dir_every_n_iter = X0.size;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    h_old = np.eye(X0.size, X0.size); # approximation of inv(hessian)
    
    for iter_no in range(1, N_iter_max + 1):
        grad_old = func_grad(X_old);
        
        if (np.linalg.norm(grad_old) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;    
            
        if (iter_no == 1 or iter_no % (reset_dir_every_n_iter) == 0):
            d = -grad_old; # resetting directional vector
        else:
            d = -h_old @ grad_old; # conjugate directional vector
        
        alpha = -(grad_old.T @ d) / (d.T @ func_hessian(X_old) @ d); # step size, this formula valid only for quadratic function
        X = X_old + alpha * d;        
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
                    
        grad = func_grad(X);
        delta_X = X - X_old;
        delta_grad = grad - grad_old;
        delta_h = bfgs(h_old, delta_X, delta_grad);
        h = h_old + delta_h;      
        X_old = X;
        h_old = h;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
Exemplo n.º 2
0
def Clsfilter(kernel_degradation, image_degraded, bpp, reg_coef, domain):
    # Constrained least squares filter
    # Performs image restoration in image or frequency domain
    # kernel_degradation - convolution kernel (linear degradation model)
    # image_degraded - degraded 2D image
    # bpp - bits per pixel
    # reg_coef - regularization coefficient
    # domain - 'image' or 'fft'
    
    N1c, N2c = image_degraded.shape;
    y = image_degraded.reshape((N1c * N2c, 1), order='C');
    hw, _ = kernel_degradation.shape;
    N1, N2 = N1c + 1 - hw, N2c + 1 - hw;
    h = np.zeros((N1c, N2c));
    h[0:hw, 0:hw] = kernel_degradation;

    # high pass filter for noise suppression
    kernel_laplacian = np.array([[0.00, 0.25, 0.00],
                                 [0.25, -1.00, 0.25],
                                 [0.00, 0.25, 0.00]]);
    cw, _ = kernel_laplacian.shape;
    c = np.zeros((N1c, N2c));
    c[0:cw, 0:cw] = kernel_laplacian;
    
    if domain == 'image':        
        Hbc = _MakeBlockCirculantMatrix(h);
        Cbc = _MakeBlockCirculantMatrix(c); 
        
        x_r = np.linalg.solve(Hbc.T @ Hbc + reg_coef * Cbc.T @ Cbc, Hbc.T @ y);
        x_r = x_r.reshape(N1c, N2c);
        x_r = np.roll(x_r, axis = 1, shift = -hw + 1);
        x_r = np.real(x_r[0:N1, 0:N2]);
        x_r = CapIntensity(x_r, bpp);
        
        image_recovered = x_r;    
    
    elif domain == 'fft':
        H_fft = np.fft.fft2(h);
        C_fft = np.fft.fft2(c);
        Y_fft = np.fft.fft2(image_degraded);
        
        X_r_fft = np.conj(H_fft) * Y_fft / (np.abs(H_fft) ** 2 + reg_coef * np.abs(C_fft) ** 2);
        x_r = np.fft.ifft2(X_r_fft);        
        x_r = np.real(x_r[0:N1, 0:N2]);
        x_r = CapIntensity(x_r, bpp);
        
        image_recovered = x_r;    
        
    return image_recovered;
Exemplo n.º 3
0
def WienerFilterK(kernel_degradation, image_degraded, image_original, K, bpp):
    # Wiener filter
    # Performs image restoration in the frequency domain
    # kernel_degradation - convolution kernel (linear degradation model)
    # image_degraded - degraded 2D image
    # image_original - original 2D image
    # K - constant, the ratio of the power spectrum of noise to the power spectrum of the nondegraded image
    # bpp - bits per pixel
    
    N1c, N2c = image_degraded.shape;
    hw, _ = kernel_degradation.shape;
    N1, N2 = N1c + 1 - hw, N2c + 1 - hw;
    y = image_degraded;
    h = np.zeros((N1c, N2c));
    h[0:hw, 0:hw] = kernel_degradation;     
    
    Y_fft = np.fft.fft2(y);
    H_fft = np.fft.fft2(h);   
    
    X_r_fft = np.conj(H_fft) * Y_fft / (np.abs(H_fft) ** 2 + K);
    x_r = np.fft.ifft2(X_r_fft);        
    x_r = np.real(x_r[0:N1, 0:N2]);
    x_r = CapIntensity(x_r, bpp);
    
    image_recovered = x_r;     
    
    return image_recovered;
Exemplo n.º 4
0
def WienerFilter(kernel_degradation, image_degraded, image_original, image_noise, bpp):
    # Wiener filter
    # Performs image restoration in the frequency domain
    # kernel_degradation - convolution kernel (linear degradation model)
    # image_degraded - degraded 2D image
    # image_original - original 2D image
    # image_noise - noise 2D image
    # bpp - bits per pixel
    
    N1c, N2c = image_degraded.shape;
    hw, _ = kernel_degradation.shape;
    N1, N2 = N1c + 1 - hw, N2c + 1 - hw;
    y = image_degraded;
    h = np.zeros((N1c, N2c));
    h[0:hw, 0:hw] = kernel_degradation;    
    n = image_noise;    
    f = np.zeros((N1c, N2c));
    f[0:N1, 0:N2] = image_original;    
    
    Y_fft = np.fft.fft2(y);
    H_fft = np.fft.fft2(h);
    N_fft = np.fft.fft2(n);
    F_fft = np.fft.fft2(f);
    
    X_r_fft = np.conj(H_fft) * Y_fft / (np.abs(H_fft) ** 2 + np.abs(N_fft) ** 2 / np.abs(F_fft) ** 2);
    x_r = np.fft.ifft2(X_r_fft);        
    x_r = np.real(x_r[0:N1, 0:N2]);
    x_r = CapIntensity(x_r, bpp);
    
    image_recovered = x_r;     
    
    return image_recovered;
def ConjGradAlgorithmLinesearch(X0, func, func_grad, options):
  
    epsilon = 10e-6
    reset_dir_every_n_iter = X0.size;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    grad_old = 0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;    
            
        if (iter_no == 1 or iter_no % (reset_dir_every_n_iter) == 0):
            d = -grad; # # resetting directional vector
        else:
            beta = fletcher_reeves(grad_old, grad, d); # coefficient for calculating conjugate directional vector
            d = -grad + beta * d; # directional vector
        
        alpha = SecantAlgorithmAlphaLinesearch(X_old, func_grad, d); # step size
        X = X_old + alpha * d;        
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        grad_old = grad;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
def ConjGradAlgorithmManualAlpha(X0, func, func_grad, func_hessian, options):
  
    epsilon = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    alpha = options['alpha'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;     
            
        if (iter_no == 1):
            d = -grad; # directional vector
        else:
            # coefficient for calculating conjugate directional vector, this formula valid only for quadratic function
            beta = (grad.T @ func_hessian(X_old) @ d) / (d.T @ func_hessian(X_old) @ d); 
            d = -grad + beta * d; # directional vector
        
        X = X_old + alpha * d;        
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
def NewtonAlgorithmLinesearch(X0, func, func_grad, func_hessian, options):
  
    epsilon = 10e-6
    reg_coeff = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old); 
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;                    
            
        d = np.linalg.solve(func_hessian(X_old) + reg_coeff * np.eye(X_old.size, X_old.size), -grad); # directional vector, Levenberg-Marquardt modification
        alpha = SecantAlgorithmAlphaLinesearch(X_old, func_grad, d); # step size
        X = X_old + alpha * d; 
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
Exemplo n.º 8
0
def SpatiallyAdaptiveClsFilter(kernel_degradation, image_degraded, bpp, reg_coef, weights_1, weights_2):
    # Spatially adaptive constrained least squares filter
    # Performs spatially adaptive image restoration in image domain.\
    #   Good for preserving edges, noise will be smoothed only in the flat regions.\
    #   Noise is not noticeable at the edges so no need to smooth the edges.
    # kernel_degradation - convolution kernel (linear degradation model)
    # image_degraded - degraded 2D image
    # bpp - bits per pixel
    # reg_coef - regularization coefficient
    # weights_1 - matrix of weights (scaled 0 to 1) for data fidelity term (same size as image_degraded).\
    #   weights_1 should contain high values at the edges and low values in the flat regions
    # weights_2 - matrix of weights (scaled 0 to 1) for prior knowledge (regularization) term (same size as image_degraded).\
    #   weights_2 should contain high values in flat regions and low values at the edges
    
    W1 = np.diag(weights_1.flatten(order='C'));
    W2 = np.diag(weights_2.flatten(order='C'));
    
    N1c, N2c = image_degraded.shape;
    y = image_degraded.reshape((N1c * N2c, 1), order='C');
    hw, _ = kernel_degradation.shape;
    N1, N2 = N1c + 1 - hw, N2c + 1 - hw;
    h = np.zeros((N1c, N2c));
    h[0:hw, 0:hw] = kernel_degradation;

    # high pass filter for noise suppression
    kernel_laplacian = np.array([[0.00, 0.25, 0.00],
                                 [0.25, -1.00, 0.25],
                                 [0.00, 0.25, 0.00]]);
    cw, _ = kernel_laplacian.shape;
    c = np.zeros((N1c, N2c));
    c[0:cw, 0:cw] = kernel_laplacian;  
 
    Hbc = _MakeBlockCirculantMatrix(h);
    Cbc = _MakeBlockCirculantMatrix(c);
 
    HbctW1 = Hbc.T @ W1;
    x_r = np.linalg.solve(HbctW1 @ Hbc + reg_coef * Cbc.T @ W2 @ Cbc, HbctW1 @ y);
    x_r = x_r.reshape(N1c, N2c);
    x_r = np.roll(x_r, axis = 1, shift = -hw + 1);
    x_r = np.real(x_r[0:N1, 0:N2]);
    x_r = CapIntensity(x_r, bpp);
    
    image_recovered = x_r;     
    
    return image_recovered;