Example #1
0
def _helper_ss_sparse_compare_gradients(x_data, y_data, kernel, p_param_vector, p_step=1e-6):
    """
    
    """
    
    k1 = kernel.copy()
    ss_model = GPy.models.StateSpace( x_data, y_data, kernel=k1, noise_var=1.0, balance=False, kalman_filter_type = 'svd')
    ss_model.param_array[:] = p_param_vector
    ss_model.parameters_changed()
    print(ss_model)
    print(ss_model.checkgrad(verbose=True,step=p_step))
    
    k2 = kernel.copy()
    import GPy.models.ss_sparse_model as ss_sparse_model
    sparse_model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,k2, noise_var=1.0, balance=False, largest_cond_num=1e+12, regularization_type=2) 
    sparse_model.param_array[:] = p_param_vector
    sparse_model.parameters_changed()
    print(sparse_model)
    print(sparse_model.checkgrad(verbose=True,step=p_step))
Example #2
0
def evaluate_optimization(p_model='ss'):
    """
    1) Order of RBF kernel matters a lot. If it is larger the model can be unstable.
       Balancing of RBF model may also matter.
    
    2) Also the order of kernels matter. Changing from RBF the first or the last
    matter. - this is strange and undesirable of course. Actually, this does not make
    much sence since parameters from optimization are assigned in a certain order and
    changing this order spoil all parameter values. This was a mistake.
    
    3) Sparse model:
        '1_ex6_new_ss_arno_optimal_bfgs__qp_bound_True': Work OK. min_reg=1e+11
            No other modifications helped.
        
        '1_ex6_new_ss_arno_optimal_scg__qp_bound_True': Work OK. Variances are very small.
            min_reg=1e+15. Works also with different orders of RBF and periodic.
            
        '1_ex6_new_ss_arno_start_bfgs__qp_bound_True': Not very OK. The mean is 
            also distorted by regularization. This is the first time this effect is observed!
            min_reg=1e+11
            If we change the order of periodic kernel 6->5 then the mean distorsion effect
            gets much smaller. Other changes did not give any better results. Min reg is 
            also not increased.
            
        '1_ex6_new_ss_arno_start_scg__qp_bound_True': Work OK. min_reg=1e+15
            However lower min_reg do not work. E.g. min_reg=1e+13 produce an error.
            min_reg=1e+11 again work and result is ok, min_reg=1e+10 and 
            min_reg=1e+9 again does not work. 
            min_reg=1e+8 - works but mean and variance are distorted, probably
            because regularization type 1 turns on.
            
            Interesting case! Unclear how regularization work then!
            Errors might appear in both Cholmod and Numpy.
            E.g. (min_reg=1e+13, RBF order=6, Periodic order=6, all balances=False) gives Cholmod error.
            (min_reg=1e+13, RBF order=5, Periodic order=5, all balances=False) 
            Gives Numpy error in computing Lambdas.
            
            
    """
    #file_name = '1_ex6_new_ss_arno_start_bfgs__qp_bound_True'
    #file_name = '1_ex6_new_ss_arno_start_scg__qp_bound_True'
    
    #file_name = '1_ex6_new_sparse_arno_start_scg__qp_bound_True'
    file_name = '1_ex6_new_sparse_0_scg__qp_bound_True_reg1e14'
    
    file_path = '/home/alex/Programming/python/Sparse GP/Experiemnts/Results'

    results_dict = io.loadmat( os.path.join(file_path, file_name) )

     # Data ->
    (x_data, y_data) = load_data_mine(detrend = True,detrend_model = 'gp')
    # Data <-
    
    # predict data->
    years_to_predict = 8
    step = np.mean( np.diff(x_data[:,0]) )
    
    x_new = x_data[-1,0] + np.arange( step, years_to_predict,  step ); x_new.shape = (x_new.shape[0],1)
    x_new = np.vstack( (x_data, x_new)) # combine train and test data
    
    #ssm_mean, ssm_var = model.predict(x_new, include_likelihood=False, largest_cond_num=1e+15, regularization_type=2)
    # predict data<-
    #import pdb; pdb.set_trace()
    # Kernel ->
    var_1_per = 5.0
    ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
    period = 1    
    kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
    
    var_1_per = 1.0 # does not change
    ls_1_per = 140.0
    kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
    
    # Short term fluctuations kernel
    var_1_per = 0.5
    ls_1_per = 1.0
    kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
    
    var_1_per = 1
    ls_1_per = 100
    kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_per, lengthscale=ls_1_per, balance=False, approx_order = 6)     
    
    noise_var = 1.0
    
    kernel = kernel1 * kernel2 + kernel3# + kernel4
    # Kernel <-
    
    if p_model == 'ss':            
        model1 = GPy.models.StateSpace( x_data, y_data, kernel=kernel, noise_var=noise_var, balance=False, kalman_filter_type = 'svd')
        model2 = GPy.models.StateSpace( x_data, y_data, kernel=kernel.copy(), noise_var=noise_var, balance=False, kalman_filter_type = 'svd')
    elif p_model == 'sparse':
        # Regularization assume regularization type 2
        import GPy.models.ss_sparse_model as ss_sparse_model
        model1 = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var, balance=False, largest_cond_num=1e+14, regularization_type=2) # Arno optimal parameters
        model2 = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel.copy(), noise_var=noise_var, balance=False, largest_cond_num=1e+14, regularization_type=2) # Parameters from optimization 
    else:
        raise ValueError("Wrong Parameter 1")
    # Model <-
        
    # Arno optimal parameters ->
    model_params = io.loadmat('/home/alex/Programming/python/Sparse GP/solin-2014-supplement/arno_opt_params_to_python.mat')
    tt = np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], model_params['per_lengthscale']/2,
                                    1.0, model_params['quasi_per_mat32_lengthscale'],  
                    model_params['mat32_inacc_magnSigma2'], model_params['mat32_inacc_lengthScale']*10, 
            #model_params['rbf_magnSigma2'], model_params['rbf_lengthscale'], 
            model_params['opt_noise'] ] )
    #import pdb; pdb.set_trace()
    #print(tt)
    model1.param_array[:] = tt
    model1.parameters_changed()
    print(model1)
    #print(model1.objective_function())    
    mean1, var1 = model1.predict(x_new, include_likelihood=False) #, largest_cond_num=1e+15, regularization_type=2)    
    #_helper_ss_sparse_compare_gradients(x_data, y_data, kernel, tt, p_step=1e-4)    
    # Arno optimal parameters <-
    
    # Optmim optimal (parameters from optimization) ->
    params = results_dict['params'][0] 
    #model = GPy.models.StateSpace( x_data, y_data, kernel=kernel, noise_var=1.0, balance=False, kalman_filter_type = 'svd')
    model2.randomize()
    #import pdb; pdb.set_trace()
    params[6] = params[6]*10
    #params = np.hstack((params[0:5],params[7]))
    model2.param_array[:] = params
    model2.parameters_changed()
    print(model2)
    mean2, var2 = model2.predict(x_new, include_likelihood=False) #, largest_cond_num=1e+15, regularization_type=2)
    
    #_helper_ss_sparse_compare_gradients(x_data, y_data, kernel, params, p_step=1e-4)
    
    # Optmim optimal (parameters from optimization) <-

    # Plot ->
    plt.figure(2)
    #plt.title('Electricity Consumption Data', fontsize=30)
    plt.subplot(1,2,1)    
    plt.plot( x_data, y_data, 'g-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean1, 'b-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean1+np.sqrt(var1), 'r--', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean1-np.sqrt(var1), 'r--', label='Data',linewidth=1, markersize=5)
    plt.title('Arno Optimal parameters')    
    
    plt.subplot(1,2,2)    
    plt.plot( x_data, y_data, 'g-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean2, 'b-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean2+np.sqrt(var2), 'r--', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, mean2-np.sqrt(var2), 'r--', label='Data',linewidth=1, markersize=5)
    plt.title('Optimization parameters') 
    #plt.xlabel('Time (Hours)', fontsize=25)
    #plt.ylabel('Normalized Value', fontsize=25)
    #plt.legend(loc=2)
    
    plt.show()
Example #3
0
def experiment2_optimize_hyper_parameters(hyperparameters=1, p_model='sparse', optim = 'scg', 
                                          bound_qp_var=False, detrend_model = 'gp', detrend_file=None, save_file_name_prefix=None):
    """
    This function optimizes the hyperparameters of the model by gradient optimization.
    Note, that the trend is fitted separately and removed from the data
    before running optimization.
    
    Starting parameters are assigned a reasonable values.
    
    Input:
    ------------------------
    hyperparameters: None or int
        For optimization None must be fed. Number might be used if we want to
        load hyperparameters from a file and then start optimzation from them.
        Not used currently.
        
    p_model: string
        Defines a models, one of: 'gp', 'ss', 'sparse'
    
    optim: string
        Which optimization method is used, one of: 'scg', 'bfgs.'
        
    bound_qp_var: bool.
        Whether to bound quasi-periodic lengthscale. If not bounded some kernel
        may perform badly. Usually set to true.
    
    detrend_model: 'gp' or 'ss'
        Which model is used for detrending
        
    detrend_file: string
        Which parameters to take for detrending. Note that for detrending a GP
        model is used because for state-space and sparse the spamping is too
        dence wrt trend lengthscale, so they might perform badly.
    
    save_file_name_prefix: string
        Prefix of the resulting file name. The full file name is printed in the end
        and some other info is added to it.
        
    Output:
    --------------------------------
    Optimal hyperparameters are saved to a file.
    
    """ 
    #import pdb; pdb.set_trace()
    np.random.seed(237)
    
    import GPy.models.ss_sparse_model as ss_sparse_model
    
    # Data ->
    (x_data, y_data) = load_data(detrend = True, detrend_model = detrend_model, detrend_file=detrend_file)
    # Data <-
    #import pdb; pdb.set_trace()
    
    # Arno start hyper parameters ->
    if hyperparameters is not None:    
        model_params = io.loadmat('/home/alex/Programming/python/Sparse GP/solin-2014-supplement/arno_opt_params_to_python.mat')
        
#        tt = np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], 
#                        model_params['per_lengthscale']/2,
#                                    1.0, model_params['quasi_per_mat32_lengthscale'],  
#                                    model_params['mat32_inacc_magnSigma2'],
#                                     model_params['mat32_inacc_lengthScale'], 
#            model_params['rbf_magnSigma2'], model_params['rbf_lengthscale'], 
#                                model_params['opt_noise'] ] )
    
    
    # Periodic kernel
    var_1_per = 0.5 #1.0 # 5.0 
    ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
    period = 1    
    kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
    if hyperparameters is not None:
        kernel1.param_array[:] = np.squeeze( np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], model_params['per_lengthscale']/2 ] ) )
                                          # division by 2 is only for Arno start, otherwise use just pure param.                          
    kernel1.period.fix()
    if bound_qp_var:
        kernel1.lengthscale.constrain_bounded(0.4, 20000)

    # Quasiperiodic part
    var_1_per = 1.0 # does not change, fixed!
    ls_1_per = 50.0 #140.0
    kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
    if hyperparameters is not None:
        kernel2.param_array[:] = np.array( [ 1.0, model_params['quasi_per_mat32_lengthscale'] ] )
    kernel2.variance.fix()
    
    # Short term fluctuations kernel
    var_1_per = 0.1 #0.5
    ls_1_per = 50
    kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
    if hyperparameters is not None:
        kernel3.param_array[:] = np.array( [ model_params['mat32_inacc_magnSigma2'],
                                     model_params['mat32_inacc_lengthScale'] ] )
    
    # RBF kernel. Not used!
    var_1_per = 1
    ls_1_per = 100
    kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_per, lengthscale=ls_1_per, balance=False, approx_order = 6)    
    if hyperparameters is not None:
        kernel4.param_array[:] = np.array( [ model_params['rbf_magnSigma2'], 
                                          model_params['rbf_lengthscale'] ] )
    noise_var = 0.1
    if hyperparameters is not None:
        noise_var = float(model_params['opt_noise'])
    
    kernel = kernel1 * kernel2 + kernel3# + kernel4
    # Arno start hyper parameters <-
    
    # Model ->
    if p_model == 'ss':            
        model = GPy.models.StateSpace( x_data, y_data, kernel=kernel, noise_var=noise_var, balance=False, kalman_filter_type = 'svd')
    elif p_model == 'sparse':
        # Regularization assume regularization type 2
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var, balance=False, 
                                                  largest_cond_num=1e+14, regularization_type=2)
    else:
        raise ValueError("Wrong Parameter 1")
    # Model <-
    
    # Model ->
    #import pdb; pdb.set_trace()
#    model.kern.mul.Mat32.variance.fix()
#    model.kern.mul.std_periodic.period.fix()
    
    print(model)
    # Model <-
    
    class observer(object):
        def __init__(self):
            """
            
            """
            
            self.opt_obj_grads = None
            self.opt_gradients = None
            self.opt_params = None
            self.opt_obj_funcs = None
            
        def notif_callable(self, me, which=None):
            """
            Description
            """
    
            #import pdb; pdb.set_trace()

            # me.obj_grads            
            if isinstance(self.opt_obj_grads , np.ndarray): # previous array          
                self.opt_obj_grads = np.vstack( (self.opt_obj_grads, me.obj_grads) )                
            else:
                rr = me.obj_grads
                if isinstance(self.opt_obj_grads , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_obj_grads), rr.shape[0]) ) * np.nan
                        self.opt_obj_grads = np.vstack( (tt, rr) )
                    else:     
                        self.opt_obj_grads.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_obj_grads = rr
                    else:
                        self.opt_obj_grads = [  (rr if (rr is not None) else np.nan), ]
    
            
            # me.gradient            
            if isinstance(self.opt_gradients , np.ndarray): # previous array          
                self.opt_gradients = np.vstack( (self.opt_gradients, me.gradient) )
            else:
                rr = me.gradient # same as me.objective_function_gradients()              
                if isinstance(self.opt_gradients , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_gradients), rr.shape[0]) ) * np.nan
                        self.opt_gradients = np.vstack( (tt, rr) )
                    else:     
                        self.opt_gradients.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_gradients = rr
                    else:
                        self.opt_gradients = [  (rr if (rr is not None) else np.nan), ]
            
            # me.param_array            
            if isinstance(self.opt_params , np.ndarray): # previous array          
                self.opt_params = np.vstack( (self.opt_params, me.param_array) )
            else:
                rr = me.param_array # same as me.objective_function_gradients()              
                if isinstance(self.opt_params , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_params), rr.shape[0]) ) * np.nan
                        self.opt_params = np.vstack( (tt, rr) )
                    else:     
                        self.opt_params.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_params = rr
                    else:
                        self.opt_params = [  (rr if (rr is not None) else np.nan), ]
        
            if self.opt_obj_funcs is None: # first iteration
                self.opt_obj_funcs = [me.objective_function(),] if (me.objective_function() is not None) else [np.nan,]
            else:
                self.opt_obj_funcs.append( me.objective_function() )
        
        def save(self, optim_save_path, optim_file_name, save= False):
            """
            Saves optim data to file
            """
            
            result_dict = {}
            result_dict['opt_obj_grads'] = self.opt_obj_grads
            result_dict['opt_gradients'] = self.opt_gradients
            result_dict['opt_params'] = self.opt_params
            result_dict['opt_obj_funcs'] = np.array(self.opt_obj_funcs)
            
            optim_file_name = optim_file_name + '__optim_history'
            if save:
                io.savemat(os.path.join(optim_save_path, optim_file_name), result_dict)
            
            return result_dict
            
            
    oo = observer()
    model.add_observer( oo, oo.notif_callable )
    
    if optim == 'bfgs':
        # L-BFGS-B optimization ->
        #' factr' - criteria for stoping. If eps * factr < (f_k - f_(k+1)) then stop
        # default: 1e7    
        #
        # pgtol : float, optional
        #    The iteration will stop when
        #    ``max{|proj g_i | i = 1, ..., n} <= pgtol``
        #    where ``pg_i`` is the i-th component of the projected gradient.
        # default: 1e-5
        model.optimize(optimizer ='lbfgsb', bfgs_factor = 1e12, gtol=1e-3, messages=True)
        # L-BFGS-B optimization <-
    elif  optim == 'scg':
        # SCG optimization ->
        # xtol: controls reduction in step size
        #     default: 1e-6
        # ftol: controls reduction in objective                   
        #     default: 1e-6
        # gtol: controls reduction in gradients
        #     default: 1e-5
        
        
        model.optimize(optimizer ='scg', xtol = 1e-3, ftol=1e-3,  gtol = 1e-3, messages=True)
        # SCG optimization <-
    else:
        raise ValueError("Wrong Optimizer name")
        #model.optimize(optimizer ='scg', messages=True)
    print(model)
    
    result_dict = {}
    result_dict['params'] = model.param_array[:]
    result_dict.update( oo.save('', '', save= False) )
    hyp = str(hyperparameters) if hyperparameters is not None else '2'
    save_file_name = save_file_name_prefix + '_' +p_model+ '_' + hyp + '_' + optim + '_' + '_qp_bound_' +str(bound_qp_var)
    print(results_files_prefix)
    print(save_file_name)    
    io.savemat(os.path.join(results_files_prefix, save_file_name), result_dict)
    
    return model, oo
Example #4
0
def experiment2_fit_sparse_gp(hyperparameters='sparse_optimal',detrend_file_name=None, other_params_file_name=None):
    """
    The model is Sparse GP.
    Fit the model to the data and plot the result.
    
    Input:
    -------------------------------------------
    hyperparameters: string
        Which parameters (hyper-parameters) to use for fitting. 
        Two options: 'sparse_optimal' and 'arno_optimal'. The first one is used here.
        The file name of the parameters is written in the text.
        
        The second one is older option for comparison.
        
    detrend_file_name: string
        Detrended file name.
    
    other_params_file_nameL string
        Other params file name.
        
    Output:
    --------------------------------------------
        Fitting plot
    """
    import time
    
    import GPy.models.ss_sparse_model as ss_sparse_model
    
    # Data ->
    (x_data, y_data) = load_data(detrend = False)
    # Data <-
    
    if hyperparameters == 'sparse_optimal':
        
        #detrend_file_name = 'ex6_trend_fit_sparse_2.mat'
        #other_params_file_name = '1_ex6_new_sparse_0_scg__qp_bound_True_reg1e14'
        #other_params_file_name = '1_ex6_new_ss_2_scg__qp_bound_True'
        #other_params_file_name = '1_ex6_new_sparse_2_scg__qp_bound_True'
        
        trend_params = io.loadmat( os.path.join(results_files_prefix, detrend_file_name) ); 
        trend_params = trend_params['params'][0]
        other_params = io.loadmat( os.path.join(results_files_prefix, other_params_file_name) )
        other_params = other_params['params'][0]
    
        var_1_per = 1
        ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
        period = 1    
        kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
        
        var_1_quasi = 1 # does not change
        ls_1_quasi = 1
        kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_quasi, lengthscale=ls_1_quasi)
        
        # Short term fluctuations kernel
        var_1_short = 1
        ls_1_short = 1
        kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_short, lengthscale=ls_1_short)
        
        # RBF kernel. Not used if the parameter without_rbf = True 
        var_1_trend = trend_params[0]
        ls_1_trend = trend_params[1]
        kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_trend, lengthscale=ls_1_trend, balance=False, 
                                    approx_order = 6)    
        
        
        kernel = kernel1 * kernel2 + kernel3        
        kernel.param_array[:] = other_params[:-1]
    
        kernel = kernel + kernel4
        
        kernel.fix() # fix all the parameters except the noise. Relevant when optimizing noise below. 
        noise_var = other_params[-1]
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var, balance=False, largest_cond_num=1e+14, regularization_type=2)
        #model.optimize(messages=True)
        # The value below is obtained my uncommenting the line above and performing optimization.
        model.Gaussian_noise = 0.000358573407244 # recompute
        
        print('Run sparse model')
        
    elif hyperparameters == 'arno_optimal':
        model_params = io.loadmat('/home/alex/Programming/python/Sparse GP/solin-2014-supplement/arno_opt_params_to_python.mat')
        
        var_1_per = 1
        ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
        period = 1    
        kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
        
        var_1_per = 1 # does not change
        ls_1_per = 1
        kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # Short term fluctuations kernel
        var_1_per = 1
        ls_1_per = 1
        kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # RBF kernel. Not used if the parameter without_rbf = True
        var_1_per = 1
        ls_1_per = 1
        kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_per, lengthscale=ls_1_per, balance=False, approx_order = 6)    
        
        # RBF kernel. Not used if the parameter 
        
        
        kernel = kernel1 * kernel2 + kernel3 + kernel4
        #kernel = kernel1 * kernel2 + kernel4
        
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=1.0, balance=False, largest_cond_num=1e+17, regularization_type=2)
        #import pdb; pdb.set_trace()
        model.param_array[:] = np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], model_params['per_lengthscale']/2,
                                    1.0, model_params['quasi_per_mat32_lengthscale'],  
                    model_params['mat32_inacc_magnSigma2'], model_params['mat32_inacc_lengthScale']*50, 
                          model_params['rbf_magnSigma2'], model_params['rbf_lengthscale'], 
                                    model_params['opt_noise'] ] )

    print(model)
    # predict ->
    years_to_predict = 8
    step = np.mean( np.diff(x_data[:,0]) )
    
    x_new = x_data[-1,0] + np.arange( step, years_to_predict,  step ); x_new.shape = (x_new.shape[0],1)
    x_new = np.vstack( (x_data, x_new)) # combine train and test data
    #import pdb; pdb.set_trace()
    t1 = time.time()
    sparse_mean, sparse_var = model.predict(x_new, include_likelihood=True, balance=False,
                                      largest_cond_num=1e+14, regularization_type=2)
    print('Sparse GP prediction time {0:e} sec.'.format(time.time() - t1))
    # predict <-
    
#     # Plot ->
#    sparse_mean,sparse_std,x_data_denorm,y_data_denorm = denormalize_data(sparse_mean,np.sqrt(sparse_var))   
#    x_new = x_new + 1974 # Put the right year
#    
#    plt.figure(2)
#    #plt.title('Electricity Consumption Data', fontsize=30)    
#    plt.plot( x_data_denorm, y_data_denorm, 'g-', label='Data',linewidth=1, markersize=5)
#    plt.plot( x_new, sparse_mean, 'b-', label='Mean',linewidth=1, markersize=5)
#    plt.plot( x_new, sparse_mean+2*sparse_std, 'r--', label='2*Std',linewidth=1, markersize=5)
#    plt.plot( x_new, sparse_mean-2*sparse_std, 'r--', label='2*Std',linewidth=1, markersize=5)
#    plt.xlim((2010,2026))
#    plt.ylim((370,450))
#    #plt.xlabel('Time (Hours)', fontsize=25)
#    #plt.ylabel('Normalized Value', fontsize=25)
#    #plt.legend(loc=2)
#    plt.show()    
#    # Plot <- 

    # Plot ->
    sparse_mean,sparse_std,x_data_denorm,y_data_denorm = denormalize_data(sparse_mean,np.sqrt(sparse_var))   
    x_new = x_new + 1974 # Put the right year
    
    plt.figure(2)
    (y_min,y_max) = (370,450)    
    (x_min,x_max) = (2010,2026)
    
    title_font = {'family': 'serif', 'color':  'k','weight': 'normal', 'size': 22}
    plt.title(r'Spin-GP prediction', fontdict=title_font)
    
    plt.plot( x_data_denorm, y_data_denorm, label='Data', marker= '*', color='0', markersize=5, linewidth=0)
    plt.plot( x_new, sparse_mean, color=(0,0,1), linestyle='-', label=r'$m$  (prediction mean)',linewidth=1, markersize=5)
    plt.plot( x_new, sparse_mean+2*sparse_std, color=(0,0,0.5), linestyle='--', label=r'$m\pm2\sigma$',linewidth=1)
    plt.plot( x_new, sparse_mean-2*sparse_std, color=(0,0,0.5), linestyle='--', label=None,linewidth=1)
    
    plt.plot( (x_data_denorm[-1], x_data_denorm[-1]),(380,440),  color=(0.2,0.2,0.2), linestyle='--', linewidth=1,
        label='Data/Prediction delimiter')
    plt.xlim( (x_min,x_max) )
    plt.ylim( (y_min,y_max) )
    
    labels_font = {'family': 'serif', 'color':  'k','weight': 'normal', 'size': 20}
    plt.xlabel(r'Time (year)', fontdict=labels_font)
    plt.ylabel(r'$\mathrm{CO_2}$ concentration (ppm)', fontdict=labels_font)
    plt.tight_layout() # for adjusting the bottom of x label
    # legend ->
    plt.legend(loc=4)
    # legend <-
    
    # Grid ->    
    plt.grid(False)
    # Grid <-  
    
    # Ticks ->
    ax = plt.gca()
    from matplotlib.ticker import MultipleLocator, AutoMinorLocator
    x_major = MultipleLocator(4); x_minor = MultipleLocator(2)
    ax.xaxis.set_major_locator(x_major); ax.xaxis.set_minor_locator(x_minor)
    y_major = MultipleLocator(20); y_minor = MultipleLocator(10)    
    ax.yaxis.set_major_locator(y_major); ax.yaxis.set_minor_locator(y_minor)
    #ax.xaxis.set_minor_locator(AutoMinorLocator(2))    
    
    plt.tick_params(which='both', width=1)
    plt.tick_params(which='major', length=7)
    plt.tick_params(which='minor', length=4)

    plt.yticks( range(370,470,20), (str(ss) for ss in range(370,470,20)), fontsize=20)
    plt.xticks([2010,2014,2018,2022,2026], ['2010','2014','2018','2022','2026'],fontsize=20)
    # Ticks <-
    plt.show()    
Example #5
0
def load_data(detrend = False, detrend_model = 'gp', detrend_file='ex6_trend_fit_sparse_2.mat',plot=False):
    """
    The function loads the raw data, normalize it (zero mean, unit variance) 
    and optionally detrends it.
    
    Input:
    -------------------------
    detrend: bool
        Whether to detrend or not
    
    detrend_model: which model is used for detrending. Sparse model and StateSpace model
        may be not good for detrending because data dt is too small wrt the trend lengthscale.
    
    detrend_model: string
        Which model is used for detrending.
    
    detrend_file: string
        file_name of the trend parameters. Parameters are found in the function experiment2_fit_trend.
        See also the variable 
    
    plot: bool
        Whether to plot the trend.
    """
    
    #import pdb; pdb.set_trace()
    x_data, y_data = load_data_low_level(data_file_path)
    
    y_data = (y_data - np.mean(y_data)) / np.std(y_data)
    x_data = x_data - 1974 # Start time from 0.
    
    #import pdb; pdb.set_trace()
    if detrend:
        np.random.seed(234) # seed the random number generator
        
        results_dict = io.loadmat( os.path.join(results_files_prefix, detrend_file) )
        
        params = results_dict['params'][0]
        variance_init = float(params[0])
        lengthcale_init = float(params[1])
        noise_var_init = float(params[2])
        
        if detrend_model == 'gp':
            kernel = GPy.kern.RBF(1,variance=variance_init, lengthscale = lengthcale_init)
            kernel.variance.fix()
            kernel.lengthscale.fix()
        
            model = GPy.models.GPRegression(x_data, y_data, kernel, noise_var=noise_var_init)
            model.optimize()
        elif detrend_model == 'ss':
       
            kernel = GPy.kern.sde_RBF(1,variance=variance_init, lengthscale = lengthcale_init,
                                      balance= False, approx_order = 6 )
            kernel.variance.fix()
            kernel.lengthscale.fix()
            
            model = GPy.models.StateSpace(x_data, y_data, kernel, noise_var=noise_var_init, balance=False, kalman_filter_type = 'svd')
            
        elif detrend_model == 'sparse':
            import GPy.models.ss_sparse_model as ss_sparse_model
            kernel = GPy.kern.sde_RBF(1,variance=variance_init, lengthscale = lengthcale_init,
                                      balance= False, approx_order = 6 )
            kernel.variance.fix()
            kernel.lengthscale.fix()
            
            model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var_init, 
                                                      balance=False, 
                                                      largest_cond_num=1e+20, regularization_type=2)
    #import pdb; pdb.set_trace()
        print('Detrend:')                                              
        print(model)
        (y_pred,var_pred) = model.predict(x_data)
        
        if plot:
            plt.figure(1)
             
            plt.plot( x_data, y_data, 'b.-', label='Data',linewidth=1, markersize=5)
            plt.plot( x_data, y_pred, 'r-', label='Data',linewidth=1, markersize=5)
            plt.plot( x_data, y_pred+np.sqrt(var_pred), 'r--', label='Data',linewidth=1, markersize=5)
            plt.plot( x_data, y_pred-np.sqrt(var_pred), 'r--', label='Data',linewidth=1, markersize=5)
            
            plt.show()   
        
        y_data = y_data - y_pred
        
    return x_data, y_data
Example #6
0
def experiment2_fit_trend(trend_model='gp', optimize=False, optimize_file_prefix = '1', load_params=True, load_params_file_name=None):
    """
    This function fits the trend to the original raw data.
    
    This function must be run in order to find the parameters (hyperparameters) 
    of the trend.    
        
    Input:
    ------------------
    
    trend_model: string
        Which model is used for fitting
    
    optimize: bool
        Whether to perform trend parameters optimization.
    
    optimize_file_prefix: string
        When parameters optimized this is the prefix of the file with optimal parameters.
        Note, that 'trend_model' is added to the end or the file name anyway.
    
    load_params:
        Whether the trend parameters are loaded from the file
    
    load_params_file_name: string
        If parameters are loaded from the file, this tells the file name of the parameters.
    Output:
    ----------------------------
    
    The output is the plot of the trend(if no optimization happen) or
    optimization is done and parameters are saved in the file. File name see in the
    code.
    
    """
    
    (x_data,y_data) = load_data(detrend = False) # loads the raw data

    np.random.seed(234) # seed the random number generator just in case there are some initializations.
    
    if not load_params:
        variance_init = 40
        lengthcale_init = 400
        noise_var_init = 0.1
    else:
        #import pdb; pdb; pdb.set_trace()
        
        results_dict = io.loadmat( os.path.join(results_files_prefix, load_params_file_name) )
        
        params = results_dict['params'][0]
        variance_init = float(params[0])
        lengthcale_init = float(params[1])
        noise_var_init = float(params[2])
    
    if trend_model == 'gp':
        kernel = GPy.kern.RBF(1,variance=variance_init, lengthscale = lengthcale_init)
        
        model = GPy.models.GPRegression(x_data, y_data, kernel, noise_var=noise_var_init)
        
    elif trend_model == 'ss':
        if optimize:
            x_data = x_data[::10]; y_data = y_data[::10]
        kernel = GPy.kern.sde_RBF(1,variance=variance_init, lengthscale = lengthcale_init,
                                  balance= False, approx_order = 6 )
        kernel.lengthscale.constrain_bounded(100,500)
        model = GPy.models.StateSpace(x_data, y_data, kernel, noise_var=noise_var_init, balance=False, kalman_filter_type = 'svd')
        
    elif trend_model == 'sparse':
        if optimize:
            x_data = x_data[::40]; y_data = y_data[::40]
            
        import GPy.models.ss_sparse_model as ss_sparse_model
        kernel = GPy.kern.sde_RBF(1,variance=variance_init, lengthscale = lengthcale_init,
                                  balance= False, approx_order = 6 )
        kernel.lengthscale.constrain_bounded(100,500)
        
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var_init, 
                                                  balance=False, 
                                                  largest_cond_num=1e+20, regularization_type=2)
    #import pdb; pdb.set_trace()
    if optimize:
        model.optimize(messages=True)
        print(model)
        #import pdb; pdb.set_trace()
        
        result_dict = {}
        result_dict['params'] = model.param_array[:]
        save_file_name = optimize_file_prefix + '_' + trend_model
        print(save_file_name)
        io.savemat(os.path.join(results_files_prefix, save_file_name), result_dict)
    else:
        model.parameters_changed()
        
    print(model)
    if trend_model == 'gp':
        (y_pred,var_pred) = model.predict(x_data)
    elif trend_model == 'ss':
        (y_pred,var_pred) = model.predict(x_data, include_likelihood=True)
    elif trend_model == 'sparse':
        (y_pred,var_pred) = model.predict(x_data, include_likelihood=True, balance=False,
                                      largest_cond_num=1e+14, regularization_type=2)
        #(y_pred,var_pred) = model.predict(x_data)
    
    plt.figure(1)
     
    plt.plot( x_data, y_data, 'b.-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_data, y_pred, 'r-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_data, y_pred+np.sqrt(var_pred), 'r--', label='Data',linewidth=1, markersize=5)
    plt.plot( x_data, y_pred-np.sqrt(var_pred), 'r--', label='Data',linewidth=1, markersize=5)
    
    plt.show() 
Example #7
0
def model_time_measurement(n_points,
                           kernel_num=0,
                           repetitions_num=3,
                           run_sparse=True,
                           run_ss=True,
                           run_gp=True):
    """
    This function builds a model (in GPy sense) and measure the time it took.
    
    Input:
    ------------------------------
    
    n_points: int
        Number of data points in the data
    kenrel_num: int
        Which kernel to use. For State-space and sparse model the kernel defines
        the block size in the inverse kernel BTD matrix.
    repetitions_num: int
        How many times time measurement is performed
    run_sparse: bool
        Whether to run the time measurement for SpiIn-GP
    run_ss: bool
        Whether to run the time measurement for State-space model
    run_gp: bool
        Whether to run the time measurement for State-space model
        
    Output:
    ----------------------
    Mean running time
    
    """

    print(
        'Time measurement test: %i data points, run_sparse %i, run_ss %i , run_gp %i'
        % (n_points, run_sparse, run_ss, run_gp))
    x_data, y_data = generate_data(n_points,
                                   p_x_lower_value=0.0,
                                   p_x_upper_value=20000.0)

    kernel1, kernel2 = select_kernel(kernel_num)
    noise_var = 0.1

    sparse_run_time = []
    ss_run_time = []
    gp_run_time = []
    gc.collect()
    for rep_no in range(repetitions_num):
        if run_sparse:
            #print('Sparse GP run:')
            kern = kernel1.copy()

            t1 = time.time()
            sparse_gp = ss_sparse_model.SparcePrecisionGP(
                x_data,
                y_data,
                kernel=kern,
                noise_var=noise_var,
                balance=False,
                largest_cond_num=1e+10,
                regularization_type=2)
            sparse_run_time.append(time.time() - t1)

            sparse_gp_marginal_ll = (-sparse_gp.objective_function())
            sparse_d_marginal_ll = -sparse_gp.objective_function_gradients()
            sparse_d_marginal_ll.shape = (sparse_d_marginal_ll.shape[0], 1)

            del sparse_gp, kern
            gc.collect()
        if run_ss:
            #print('SS run:')
            kern = kernel1.copy()

            t1 = time.time()
            ss_model = GPy.models.StateSpace(x_data,
                                             y_data,
                                             kernel=kern,
                                             noise_var=noise_var,
                                             balance=False,
                                             kalman_filter_type='svd')

            ss_run_time.append(time.time() - t1)

            #ss_marginal_ll = (-ss_model.objective_function())
            #ss_d_marginal_ll = -ss_model.objective_function_gradients(); ss_d_marginal_ll.shape = (ss_d_marginal_ll.shape[0],1)

            del ss_model, kern
            gc.collect()
        if run_gp:
            #print('Regular GP run:')
            kern = kernel2.copy()

            t1 = time.time()
            gp_reg = GPy.models.GPRegression(x_data,
                                             y_data,
                                             kern,
                                             noise_var=noise_var)
            gp_run_time.append(time.time() - t1)

            #gp_marginal_ll = (-gp_reg.objective_function())
            #gp_d_marginal_ll = -gp_reg.objective_function_gradients(); gp_d_marginal_ll.shape = (gp_d_marginal_ll.shape[0],1)

            del gp_reg, kern
            gc.collect()
    gc.collect()
    return sparse_run_time, ss_run_time, gp_run_time
Example #8
0
def experiment6_action1(hyperparameters='arno_start', p_model='ss', optim = 'bfgs', bound_qp_var=False):
    """
    Copies the first submision experiment6 action 1. Training of hyper parameters.
    It was working improperly
    earlier. What is now? - Working.
    """ 
    
    import GPy.models.ss_sparse_model as ss_sparse_model
    
    # Data ->
    data_file_path= '/home/agrigori/Programming/python/Sparse GP/CO2_data/co2_weekly_init_clean.csv'
    results_filex_prefix = '/home/agrigori/Programming/python/Sparse GP/Experiemnts/Results'
    
    data = np.loadtxt(data_file_path); 
    data = data[ np.where(data[:,1] > 0)[0] ,:] # get rid of missing values
    
    y_data = data[:,1]; y_data.shape = (y_data.shape[0],1)
    x_data = data[:,0]; x_data.shape = (x_data.shape[0],1)
    
    y_data = (y_data - np.mean(y_data)) / np.std(y_data)
    x_data = x_data - 1974
    # Data <-
    
    if hyperparameters == 'arno_start':
        # Start hyper parameters which were used in the 
        var_1_trend = 1e4
        ls_1_trend = 100.0
        
        per_per = 1 # fixed
        per_var = 5.0
        per_ls = 1
        var_2_quasi = 1.0
        ls_2_quasi = 140
        
        var_3_quasi = 0.5
        ls_3_quasi = 1
        
        noise_var = 1.0
    
        kernel =  GPy.kern.sde_StdPeriodic(1, variance=per_var, period = per_per, lengthscale=per_ls, balance=False, approx_order = 6)*\
                  GPy.kern.sde_Matern32(1, variance=var_2_quasi, lengthscale=ls_2_quasi) +\
                  GPy.kern.sde_Matern32(1, variance=var_3_quasi, lengthscale=ls_3_quasi) +\
                  GPy.kern.sde_RBF(1, variance=var_1_trend, lengthscale=ls_1_trend, balance=False, approx_order = 6)
                  
        kernel.mul.Mat32.variance.fix()
        kernel.mul.std_periodic.period.fix()
        if bound_qp_var:
            kernel.mul.std_periodic.lengthscale.constrain_bounded(0.2, 20000)
#        result_dict = io.loadmat(os.path.join(results_filex_prefix,'ex6_params'))
#        params = result_dict['params'].squeeze()
#        
#        kernel[:] = params[0:-1]
#        noise_var = params[-1]
        if p_model == 'ss':            
            model = GPy.models.StateSpace( x_data, y_data, kernel=kernel, noise_var=noise_var, balance=False, kalman_filter_type = 'svd')
        elif p_model == 'sparse':
            # Regularization assume regularization type 2
            model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=1.0, balance=False, largest_cond_num=1e+11, regularization_type=2)
            # optim scg: Kernel: std_per approx order=6 bal=False, rbf_approx_order=6, bal=False. Model:  balance=False, largest_cond_num=1e+12, regularization_type=2.
            # optim bfgs: Kernel: std_per approx order=6 bal=False, rbf_approx_order=5, bal=False. Model:  balance=False, largest_cond_num=1e+11, regularization_type=2.
        else:
            raise ValueError("Wrong Parameter 1")
    elif hyperparameters == 'arno_optimal':
        model_params = io.loadmat('/home/agrigori/Programming/python/Sparse GP/solin-2014-supplement/arno_opt_params_to_python.mat')
        
        var_1_per = 1
        ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
        period = 1    
        kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
        
        var_1_per = 1 # does not change
        ls_1_per = 1
        kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # Short term fluctuations kernel
        var_1_per = 1
        ls_1_per = 1
        kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # RBF kernel. Not used if the parameter without_rbf = True 
        var_1_per = 1
        ls_1_per = 1
        kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_per, lengthscale=ls_1_per, balance=False, approx_order = 6)    
        
        # RBF kernel. Not used if the parameter 
        
        
        kernel = kernel1 * kernel2 + kernel3 + kernel4
        
        if p_model == 'ss':
            model = GPy.models.StateSpace( x_data, y_data, kernel=kernel, noise_var=1.0, balance=False, kalman_filter_type = 'regular')
        elif p_model == 'sparse':
            # Regularization assume regularization type 2
            
            model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=1.0, balance=False, largest_cond_num=1e+15, regularization_type=2) 
            # Parameters of runs: 
            # optim bfgs: Kernel: std_per approx order=6 bal=False, rbf_approx_order=6, bal=False. Model: balance=False, largest_cond_num=1e+15, regularization_type=2
            # optim scg: Kernel: std_per approx order=6 bal=False, rbf_approx_order=6, bal=False. Model: balance=False, largest_cond_num=1e+13, regularization_type=2
        else:
            raise ValueError("Wrong Parameter 2")
        
        tt = np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], model_params['per_lengthscale']/2,
                                    1.0, model_params['quasi_per_mat32_lengthscale'],  model_params['mat32_inacc_magnSigma2'],
                                     model_params['mat32_inacc_lengthScale'], 
            model_params['rbf_magnSigma2'], model_params['rbf_lengthscale'], model_params['opt_noise'] ] )
            
        #print(tt)
        model.param_array[:] = tt
        
        #import pdb; pdb.set_trace()
        model.kern.mul.Mat32.variance.fix()
        model.kern.mul.std_periodic.period.fix()
        if bound_qp_var:
            model.kern.mul.std_periodic.lengthscale.constrain_bounded(0.2, 20000)
        #model.kern.mul.std_periodic.
        print(model)
        
    class observer(object):
        def __init__(self):
            """
            
            """
            
            self.opt_obj_grads = None
            self.opt_gradients = None
            self.opt_params = None
            self.opt_obj_funcs = None
            
        def notif_callable(self, me, which=None):
            """
            Description
            """
    
            #import pdb; pdb.set_trace()

            # me.obj_grads            
            if isinstance(self.opt_obj_grads , np.ndarray): # previous array          
                self.opt_obj_grads = np.vstack( (self.opt_obj_grads, me.obj_grads) )                
            else:
                rr = me.obj_grads
                if isinstance(self.opt_obj_grads , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_obj_grads), rr.shape[0]) ) * np.nan
                        self.opt_obj_grads = np.vstack( (tt, rr) )
                    else:     
                        self.opt_obj_grads.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_obj_grads = rr
                    else:
                        self.opt_obj_grads = [  (rr if (rr is not None) else np.nan), ]
    
            
            # me.gradient            
            if isinstance(self.opt_gradients , np.ndarray): # previous array          
                self.opt_gradients = np.vstack( (self.opt_gradients, me.gradient) )
            else:
                rr = me.gradient # same as me.objective_function_gradients()              
                if isinstance(self.opt_gradients , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_gradients), rr.shape[0]) ) * np.nan
                        self.opt_gradients = np.vstack( (tt, rr) )
                    else:     
                        self.opt_gradients.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_gradients = rr
                    else:
                        self.opt_gradients = [  (rr if (rr is not None) else np.nan), ]
            
            # me.param_array            
            if isinstance(self.opt_params , np.ndarray): # previous array          
                self.opt_params = np.vstack( (self.opt_params, me.param_array) )
            else:
                rr = me.param_array # same as me.objective_function_gradients()              
                if isinstance(self.opt_params , list):
                    if isinstance(rr, np.ndarray):
                        tt = np.empty( (len(self.opt_params), rr.shape[0]) ) * np.nan
                        self.opt_params = np.vstack( (tt, rr) )
                    else:     
                        self.opt_params.append( (rr if rr is not None else np.nan) )
                else:
                    if isinstance(rr , np.ndarray):
                        self.opt_params = rr
                    else:
                        self.opt_params = [  (rr if (rr is not None) else np.nan), ]
        
            if self.opt_obj_funcs is None: # first iteration
                self.opt_obj_funcs = [me.objective_function(),] if (me.objective_function() is not None) else [np.nan,]
            else:
                self.opt_obj_funcs.append( me.objective_function() )
        
        def save(self, optim_save_path, optim_file_name, save= False):
            """
            Saves optim data to file
            """
            
            result_dict = {}
            result_dict['opt_obj_grads'] = self.opt_obj_grads
            result_dict['opt_gradients'] = self.opt_gradients
            result_dict['opt_params'] = self.opt_params
            result_dict['opt_obj_funcs'] = np.array(self.opt_obj_funcs)
            
            optim_file_name = optim_file_name + '__optim_history'
            if save:
                io.savemat(os.path.join(optim_save_path, optim_file_name), result_dict)
            
            return result_dict
            
            
    oo = observer()
    model.add_observer( oo, oo.notif_callable )
    
    if optim == 'bfgs':
        # L-BFGS-B optimization ->
        #' factr' - criteria for stoping. If eps * factr < (f_k - f_(k+1)) then stop
        # default: 1e7    
        #
        # pgtol : float, optional
        #    The iteration will stop when
        #    ``max{|proj g_i | i = 1, ..., n} <= pgtol``
        #    where ``pg_i`` is the i-th component of the projected gradient.
        # default: 1e-5
        model.optimize(optimizer ='lbfgsb', bfgs_factor = 1e12, gtol=1e-3, messages=True)
        # L-BFGS-B optimization <-
    elif  optim == 'scg':
        # SCG optimization ->
        # xtol: controls reduction in step size
        #     default: 1e-6
        # ftol: controls reduction in objective                   
        #     default: 1e-6
        # gtol: controls reduction in gradients
        #     default: 1e-5
        
        
        model.optimize(optimizer ='scg', xtol = 1e-3, ftol=1e-3,  gtol = 1e-3, messages=True)
        # SCG optimization <-
    else:
        raise ValueError("Wrong Optimizer name")
        #model.optimize(optimizer ='scg', messages=True)
    print(model)
    
    result_dict = {}
    result_dict['params'] = model.param_array[:]
    result_dict.update( oo.save('', '', save= False) )
    save_file_name = '1_ex6_new_' + p_model+ '_' + hyperparameters + '_' + optim + '_' + '_qp_bound_' +str(bound_qp_var)
    print(results_filex_prefix)
    print(save_file_name)    
    io.savemat(os.path.join(results_filex_prefix, save_file_name), result_dict)
    
    return model, oo
Example #9
0
def experiment6_action3(hyperparameters='my_ex6_optimal'):
    """
    Copies the first submision experiment6 action 4. It was working improperly
    earlier. What is now?
    
    'my_ex6_optimal' are probably somehow bad, did not work ok, however did not 
    try much as well. 
    
    'arno_optimal' - works well. Values of max_cond_number start from 1e+15. (1e+16 gives error)
            1e+14 - visually indistinguishable from ss run. If we decrease
            consitional number then variance expands, mean visually is not affected.
            
    """
        
    import GPy.models.ss_sparse_model as ss_sparse_model
    
    # Data ->
    data_file_path= '/home/agrigori/Programming/python/Sparse GP/CO2_data/co2_weekly_init_clean.csv'
    results_filex_prefix = '/home/agrigori/Programming/python/Sparse GP/Experiemnts/Results'
    
    data = np.loadtxt(data_file_path); 
    data = data[ np.where(data[:,1] > 0)[0] ,:] # get rid of missing values
    
    y_data = data[:,1]; y_data.shape = (y_data.shape[0],1)
    x_data = data[:,0]; x_data.shape = (x_data.shape[0],1)
    
    y_data = (y_data - np.mean(y_data)) / np.std(y_data)
    x_data = x_data - 1974
    # Data <-
    
    if hyperparameters == 'my_ex6_optimal':
        var_1_trend = 1.0
        ls_1_trend = 200
        
        per_per = 1
        per_var = 1.0 # fixed
        per_ls = 1
        var_2_quasi = 1.0
        ls_2_quasi = 1.0
        
        var_3_quasi = 1.0
        ls_3_quasi = 100.0
        
        noise_var = 0.1
    
        kernel = GPy.kern.sde_RBF(1, variance=var_1_trend, lengthscale=ls_1_trend) +\
                  GPy.kern.sde_StdPeriodic(1, variance=per_var, period = per_per, lengthscale=per_ls)*\
                  GPy. kern.sde_Matern32(1, variance=var_2_quasi, lengthscale=ls_2_quasi) +\
                  GPy. kern.sde_Matern32(1, variance=var_3_quasi, lengthscale=ls_3_quasi)
    
        kernel.mul.std_periodic.variance.fix()
        kernel.mul.std_periodic.period.fix()
        
        result_dict = io.loadmat(os.path.join(results_filex_prefix,'ex6_params'))
        params = result_dict['params'].squeeze()
        
        kernel[:] = params[0:-1]
        noise_var = params[-1]
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=noise_var, balance=False, p_Inv_jitter=1e-14 )
        
    elif hyperparameters == 'arno_optimal':
        model_params = io.loadmat('/home/agrigori/Programming/python/Sparse GP/solin-2014-supplement/arno_opt_params_to_python.mat')
        
        var_1_per = 1
        ls_1_per = 1 / 2 # division by 2 is done because meanings in parameters are different
        period = 1    
        kernel1 = GPy.kern.sde_StdPeriodic(1, variance=var_1_per, lengthscale=ls_1_per, period=period, balance=False, approx_order = 6)
        
        var_1_per = 1 # does not change
        ls_1_per = 1
        kernel2 = GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # Short term fluctuations kernel
        var_1_per = 1
        ls_1_per = 1
        kernel3 =  GPy.kern.sde_Matern32(1, variance=var_1_per, lengthscale=ls_1_per)
        
        # RBF kernel. Not used if the parameter without_rbf = True 
        var_1_per = 1
        ls_1_per = 1
        kernel4 =  GPy.kern.sde_RBF(1, variance=var_1_per, lengthscale=ls_1_per, balance=False, approx_order = 6)    
        
        # RBF kernel. Not used if the parameter 
        
        
        kernel = kernel1 * kernel2 + kernel3 + kernel4
        
        model = ss_sparse_model.SparcePrecisionGP(x_data,y_data,kernel, noise_var=1.0, balance=False, largest_cond_num=1e+14, regularization_type=2)
    
        model.param_array[:] = np.array( [ model_params['per_magnSigma2'],  model_params['per_period'], model_params['per_lengthscale']/2,
                                    1.0, model_params['quasi_per_mat32_lengthscale'],  model_params['mat32_inacc_magnSigma2'],
                                     model_params['mat32_inacc_lengthScale'], 
            model_params['rbf_magnSigma2'], model_params['rbf_lengthscale'], model_params['opt_noise'] ] )

    print(model)
    # predict ->
    years_to_predict = 8
    step = np.mean( np.diff(x_data[:,0]) )
    
    x_new = x_data[-1,0] + np.arange( step, years_to_predict,  step ); x_new.shape = (x_new.shape[0],1)
    x_new = np.vstack( (x_data, x_new)) # combine train and test data
    
    ssm_mean, ssm_var = model.predict(x_new, include_likelihood=False, largest_cond_num=1e+14, regularization_type=2)
    # predict <-
    
    plt.figure(2)
    #plt.title('Electricity Consumption Data', fontsize=30)    
    plt.plot( x_data, y_data, 'g-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, ssm_mean, 'b-', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, ssm_mean+np.sqrt(ssm_var), 'r--', label='Data',linewidth=1, markersize=5)
    plt.plot( x_new, ssm_mean-np.sqrt(ssm_var), 'r--', label='Data',linewidth=1, markersize=5)
    #plt.xlabel('Time (Hours)', fontsize=25)
    #plt.ylabel('Normalized Value', fontsize=25)
    #plt.legend(loc=2)
    plt.show()