예제 #1
0
def QuasiNewtonAlgorithm(X0, func, func_grad, func_hessian, options):

    epsilon = 10e-6
    reset_dir_every_n_iter = X0.size;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    h_old = np.eye(X0.size, X0.size); # approximation of inv(hessian)
    
    for iter_no in range(1, N_iter_max + 1):
        grad_old = func_grad(X_old);
        
        if (np.linalg.norm(grad_old) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;    
            
        if (iter_no == 1 or iter_no % (reset_dir_every_n_iter) == 0):
            d = -grad_old; # resetting directional vector
        else:
            d = -h_old @ grad_old; # conjugate directional vector
        
        alpha = -(grad_old.T @ d) / (d.T @ func_hessian(X_old) @ d); # step size, this formula valid only for quadratic function
        X = X_old + alpha * d;        
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
                    
        grad = func_grad(X);
        delta_X = X - X_old;
        delta_grad = grad - grad_old;
        delta_h = bfgs(h_old, delta_X, delta_grad);
        h = h_old + delta_h;      
        X_old = X;
        h_old = h;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
예제 #2
0
def ConjGradAlgorithmLinesearchFFT(X0, func, func_grad, options):
  
    epsilon = 10e-6
    reset_dir_every_n_iter = X0.size;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_y = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_x[0] = X0;
    progress_y[0] = func(X0);
    X_old = X0;
    grad_old = 0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;    
            
        if (iter_no == 1 or iter_no % (reset_dir_every_n_iter) == 0):
            d = -grad; # # resetting directional vector
        else:
            beta = fletcher_reeves(grad_old, grad, d); # coefficient for calculating conjugate directional vector
            d = -grad + beta * d; # directional vector
        
        alpha = SecantAlgorithmAlphaLinesearchFFT(X_old, func_grad, d); # step size
        X = X_old + alpha * d;             

        # Projection onto the box constraints of X
        CapIntensityFFT(X, bpp);

        progress_x[iter_no] = X;
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        grad_old = grad;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
예제 #3
0
def ConjGradAlgorithmManualAlphaFFT(X0, func, func_grad, func_hessian, options):
  
    epsilon = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    alpha = options['alpha'];
    progress_x = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_y = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_x[0] = X0;
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;      
            
        if (iter_no == 1):
            d = -grad; # directional vector
        else:
            # coefficient for calculating conjugate directional vector, this formula valid only for quadratic function
            beta = (np.conj(grad) * func_hessian(X_old) * d) / (np.conj(d) * func_hessian(X_old) * d); 
            d = -grad + beta * d; # directional vector
        
        X = X_old + alpha * d;        

        # Projection onto the box constraints of X
        CapIntensityFFT(X, bpp);

        progress_x[iter_no] = X;
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
def ConjGradAlgorithmVarAlpha(X0, func, func_grad, func_hessian, options):
  
    epsilon = 10e-6
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;     
            
        if (iter_no == 1):
            d = -grad; # directional vector
        else:
            # coefficient for calculating conjugate directional vector, this formula valid only for quadratic function
            beta = (grad.T @ func_hessian(X_old) @ d) / (d.T @ func_hessian(X_old) @ d); 
            d = -grad + beta * d; # directional vector
        
        alpha = -(grad.T @ d) / (d.T @ func_hessian(X_old) @ d); # step size, this formula valid only for quadratic function
        X = X_old + alpha * d;        
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
def NewtonAlgorithmLinesearch(X0, func, func_grad, func_hessian, options):
  
    epsilon = 10e-6
    reg_coeff = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old); 
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;                    
            
        d = np.linalg.solve(func_hessian(X_old) + reg_coeff * np.eye(X_old.size, X_old.size), -grad); # directional vector, Levenberg-Marquardt modification
        alpha = SecantAlgorithmAlphaLinesearch(X_old, func_grad, d); # step size
        X = X_old + alpha * d; 
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
예제 #6
0
def GradAlgorithmLinesearchFFT(X0, func, func_grad, options):
  
    epsilon = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    progress_x = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_y = np.zeros((N_iter_max + 1, X0.shape[0], X0.shape[1]), dtype = np.complex128);
    progress_x[0] = X0;
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);
        
        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;          
            
        d = - grad; # directional vector
        alpha = SecantAlgorithmAlphaLinesearchFFT(X_old, func_grad, d); # step size
        X = X_old + alpha * d;

        # Projection onto the box constraints of X
        CapIntensityFFT(X, bpp);

        progress_x[iter_no] = X;
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);
예제 #7
0
def GradAlgorithmManualAlpha(X0, func, func_grad, func_hessian, options):

    epsilon = 10e-6;
    report = {};
    N_iter_max = options['N_iter_max'];
    tolerance_x = options['tolerance_x'];
    tolerance_y = options['tolerance_y'];
    bpp = options['bpp'];
    alpha = options['alpha'];
    progress_x = np.zeros((N_iter_max + 1, X0.size));
    progress_y = np.zeros((N_iter_max + 1, 1));
    progress_x[0] = X0.ravel();
    progress_y[0] = func(X0);
    X_old = X0;
    
    for iter_no in range(1, N_iter_max + 1):
        grad = func_grad(X_old);

        if (np.linalg.norm(grad) < epsilon):
            print('norm(grad) < epsilon in %d iterations, exit..' % (iter_no));
            break;            
            
        d = - grad; # directional vector
        X = X_old + alpha * d;
        
        # Projection onto the box constraints of X
        X = CapIntensity(X, bpp);
        
        progress_x[iter_no] = X.ravel();
        progress_y[iter_no] = func(X);
        
        if (AreWeDoneYet(iter_no, progress_x, progress_y, tolerance_x, tolerance_y) == True):
            break;
            
        X_old = X;
        
    report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
    return (X, report);