示例#1
0
def profile_fitting(x,
                    y,
                    err_y=None,
                    optimize=True,
                    method='GPR',
                    kernel='SE',
                    num_dim=1,
                    debug_plots=True,
                    noiseLevel=2.,
                    **kwargs):  #sigma_max=10.0, l_min = 0.005,
    """Interpolate profiles and uncertainties over a dense grid. Also return the maximum 
    value of the smoothed data.
    
    This function can use Gaussian process regression or splines. When the former is adopted, both the 
    mean and the standard deviation of the updated profile are returned. Use the spline interpolation
    only as a cross-check.
    
    We allow the use of several GPR kernels so as to assess their performance in confidently 
    predicting profiles and assessing their uncertainty consistency. 

    Parameters
    ----------
    x : array of float
        Abscissa of data to be interpolated.
    y : array of float
        Data to be interpolated.
    err_y : array of float, optional
        Uncertainty in `y`. If absent, the data are interpolated.
    optimize : bool, optional
        Specify whether optimization over hyperparameters should occur or not. Default is True.
    method : {'GPR', 'spline'}, optional
        Method to use when interpolating. Default is 'GPR' (Gaussian process
        regression). Can also use a cubic spline.
    kernel : str, optional
        Type of kernel to be used. At this stage, we create the kernel internally, but in the future
        it would be better to do it externally and just give a gptools kernel object as an argument
        More kernels should be added over time. 
    num_dim : int, optional
        Number of dimensions of the input/output data. Default is 1
    debug_plots : bool, optional
        Set to True to plot the data, the smoothed curve (with uncertainty) and
        the location of the peak value.
    noiseLevel : float, optional
        Initial guess for a noise multiplier. Default: 2
    kwargs : dictionary
        arguments to be passed on to set the hyper-prior bounds for the kernel of choice. 
    """
    # grid = scipy.linspace(max(0, x.min()), min(0.08, x.max()), 1000)
    #grid = scipy.linspace(x.min(), x.max(), 1000)
    grid = x
    # Create empty object for results:
    res = type('', (), {})()

    if method == 'GPR':
        # hp is the hyperprior. A product of kernels is a kernel, so the joint hyperprior is
        # just the product of hyperpriors for each of the hyperparameters of the individual
        # priors. gptools offers convenient functionalities to create joint hyperpriors.

        # Define the kernel type amongst the implemented options.
        if kernel == 'SE':
            assert len(kwargs) == 4
            hparams = hyperparams(**kwargs)
            #hparams.set_kwargs(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'l_mean'): hparams.l_mean = 0.005
            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'l_sd'): hparams.l_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.l_mean],
                [hparams.sigma_sd, hparams.l_sd]))
            k = gptools.SquaredExponentialKernel(
                #= ====== =======================================================================
                #0 sigma  Amplitude of the covariance function
                #1 l1     Small-X saturation value of the length scale.
                #2 l2     Large-X saturation value of the length scale.
                #= ====== =======================================================================
                # param_bounds=[(0, sigma_max), (0, 2.0)],
                hyperprior=hprior,
                initial_params=[
                    10000.0, 400000.0
                ],  # random, doesn't matter because we do random starts anyway
                fixed_params=[False] * 2)

        elif kernel == 'gibbs':
            #if num_dim == 1: assert len(kwargs) == 10
            hparams = hyperparams(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_min'): hparams.sigma_min = 0.0
            if not hasattr(hparams, 'sigma_max'): hparams.sigma_max = 10.0

            if not hasattr(hparams, 'l1_mean'): hparams.l1_mean = 0.3
            if not hasattr(hparams, 'l1_sd'): hparams.l1_sd = 0.3

            if not hasattr(hparams, 'l2_mean'): hparams.l2_mean = 0.5
            if not hasattr(hparams, 'l2_sd'): hparams.l2_sd = 0.25

            if not hasattr(hparams, 'lw_mean'): hparams.lw_mean = 0.0
            if not hasattr(hparams, 'lw_sd'): hparams.lw_sd = 0.3

            if not hasattr(hparams, 'x0_mean'): hparams.x0_mean = 0.0
            if not hasattr(hparams, 'x0_sd'): hparams.x0_sd = 0.3

            hprior = (gptools.UniformJointPrior([
                (hparams.sigma_min, hparams.sigma_max),
            ]) * gptools.GammaJointPriorAlt([
                hparams.l1_mean, hparams.l2_mean, hparams.lw_mean,
                hparams.x0_mean
            ], [hparams.l1_sd, hparams.l2_sd, hparams.lw_sd, hparams.x0_sd]))

            k = gptools.GibbsKernel1dTanh(
                #= ====== =======================================================================
                #0 sigma  Amplitude of the covariance function
                #1 l1     Small-X saturation value of the length scale.
                #2 l2     Large-X saturation value of the length scale.
                #3 lw     Length scale of the transition between the two length scales.
                #4 x0     Location of the center of the transition between the two length scales.
                #= ====== =======================================================================
                initial_params=[
                    2.0, 0.5, 0.05, 0.1, 0.5
                ],  # for random_starts!= 0, the initial state of the hyperparameters is not actually used.,
                fixed_params=[False] * 5,
                hyperprior=hprior,
            )
        elif kernel == 'matern52':
            if num_dim == 1: assert len(kwargs) == 4
            hparams = hyperparams(**kwargs)
            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'l_mean'): hparams.l_mean = 0.005

            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'l_sd'): hparams.l_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.l_mean],
                [hparams.sigma_sd, hparams.l_sd]))
            k = gptools.Matern52Kernel(  # this has 2 hyperparameters in 1D
                #= ===== ===========================================
                #0 sigma Prefactor to the kernel
                #2 l1    Length scale for first dimension
                #3 ...   More length scales for more dimensions
                #= ===== =======================================
                hyperprior=hprior,
                initial_params=[0.5, 0.5],
                fixed_params=[False] * 2)
        elif kernel == 'RQ':  # rational quadratic
            if num_dim == 1: assert len(kwargs) == 6
            hparams = hyperparams(**kwargs)

            # Defaults:
            if not hasattr(hparams, 'sigma_mean'): hparams.sigma_mean = 2.0
            if not hasattr(hparams, 'alpha_mean'): hparams.alpha_mean = 0.005
            if not hasattr(hparams, 'l1_mean'): hparams.l1_mean = 0.005

            if not hasattr(hparams, 'sigma_sd'): hparams.sigma_sd = 10.0
            if not hasattr(hparams, 'alpha_sd'): hparams.alpha_sd = 0.1
            if not hasattr(hparams, 'l1_sd'): hparams.l1_sd = 0.1

            hprior = (gptools.GammaJointPriorAlt(
                [hparams.sigma_mean, hparams.alpha_mean, hparams.l1_mean],
                [hparams.sigma_sd, hparams.alpha_sd, hparams.l1_sd]))

            k = gptools.RationalQuadraticKernel(
                #= ===== ===========================================
                #0 sigma Prefactor to the kernel
                #1 alpha Order of kernel
                #2 l1    Length scale for first dimension
                #3 l2    Length scale for second dimension
                #4 ...   More length scales for more dimensions
                #= ===== =======================================
                hyperprior=hprior,
                initial_params=[1.0, 0.5, 1.0],
                fixed_params=[False] * 3)

        elif isinstance(kernel, gptools.Kernel):
            k = kernel
        else:
            ValueError('Only the SE kernel is currently defined! Break here.')

        # Create additional noise to optimize over (the first argument is n_dims)
        nk = gptools.DiagonalNoiseKernel(
            1,
            n=0,
            initial_noise=np.mean(err_y) * noiseLevel,
            fixed_noise=True
        )  #, noise_bound=(np.mean(err_y)*noiseLevel*(4.0/5.0),np.mean(err_y)*noiseLevel*(6.0/5.0)))    #(np.min(err_y), np.max(err_y)*noiseLevel))#, enforce_bounds=True)
        #print "noise_bound= [", np.min(err_y), ",",np.max(err_y)*noiseLevel,"]"

        gp = gptools.GaussianProcess(k, X=x, y=y, err_y=err_y, noise_k=nk)

        for i in range(len(y)):
            if y[i] == 0:
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=0, err_y=0.0)
                gp.add_data(x[i], 0, n=1, err_y=0.0)

        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore", message="invalid value encountered in subtract")
            if optimize:
                res_min, ll_trials = gp.optimize_hyperparameters(
                    verbose=False, random_starts=100)
            else:
                print 'Optimization is turned off. Using initial guesses for hyperparameters!'

        m_gp, s_gp = gp.predict(grid, noise=True)
        res.free_params = gp.free_params[:]
        res.free_param_names = gp.free_param_names[:]
        res.free_param_bounds = gp.free_param_bounds[:]

        # Check percentage of points within 3 sd:
        points_in_1sd = 0.0
        points_in_2sd = 0.0
        points_in_3sd = 0.0
        for i in range(len(y)):
            # Find value of grid that is the closest to x[i]:
            gidx = np.argmin(abs(grid - x[i]))
            if abs(m_gp[gidx] - y[i]) < s_gp[gidx]:
                points_in_1sd += 1.0
            if abs(m_gp[gidx] - y[i]) > s_gp[gidx] and abs(
                    m_gp[gidx] - y[i]) < 2 * s_gp[gidx]:
                points_in_2sd += 1.0
            if abs(m_gp[gidx] - y[i]) > 2 * s_gp[gidx] and abs(
                    m_gp[gidx] - y[i]) < 3 * s_gp[gidx]:
                points_in_3sd += 1.0

        frac_within_1sd = float(points_in_1sd) / len(y)
        frac_within_2sd = float(points_in_2sd) / len(y)
        frac_within_3sd = float(points_in_3sd) / len(y)

        ###
        print("Estimating AIC, BIC...")
        sum2_diff = 0
        for i in range(len(y)):
            # Find value of grid that is the closest to x[i]:
            gidx = np.argmin(abs(grid - x[i]))
            sum2_diff = (m_gp[gidx] - y[i])**2

        chi_squared = float(sum2_diff) / len(y)
        num_params = len(hparams.__dict__) / 2
        num_data = len(y)

        AIC = chi_squared + 2.0 * num_params
        BIC = chi_squared + num_params * scipy.log(num_data)

    elif method == 'spline':
        m_gp = scipy.interpolate.UnivariateSpline(
            x,
            y,
            w=1.0 / err_y,
            s=None  #2*len(x)
        )(grid)
        if scipy.isnan(m_gp).any():
            print(x)
            print(y)
            print(err_y)
        #i = m_gp.argmax()
    else:
        raise ValueError("Undefined method: %s" % (method, ))

    if debug_plots:
        f = plt.figure()
        a = f.add_subplot(1, 1, 1)
        a.errorbar(x, y, yerr=err_y, fmt='.', color='b')
        a.plot(grid, m_gp, color='g')
        if method == 'GPR':
            gptools.univariate_envelope_plot(grid,
                                             m_gp,
                                             s_gp,
                                             ax=a,
                                             label='Inferred')
            #a.fill_between(grid, m_gp - s_gp, m_gp + s_gp, color='g', alpha=0.5)
        #plt.plot(grid[m_gp.argmax()],m_gp.max(),'r*')
        plt.xlabel('time (s)', fontsize=14)
        plt.ylabel('Signal Amplitude (A.U.)', fontsize=14)
        plt.tick_params(axis='both', which='major', labelsize=14)

    if method == 'GPR':
        res.m_gp = m_gp
        res.s_gp = s_gp
        res.frac_within_1sd = frac_within_1sd
        res.frac_within_2sd = frac_within_2sd
        res.frac_within_3sd = frac_within_3sd
        if optimize:
            res.ll = res_min.fun
            res.ll_trials = ll_trials
        res.BIC = BIC
        res.AIC = AIC
    else:
        res.m_gp = m_gp

    return res
示例#2
0
# k = gptools.SquaredExponentialKernel(1,
#                                      initial_params=[1, 0.15],
#                                      fixed_params=[False, False],
#                                      param_bounds=[(0.0, 1000.0), (0.01, 1.0)])
# k = gptools.MaternKernel(1,
#                          initial_params=[1, 3.0/2.0, 0.15],
#                          fixed_params=[False, False, False],
#                          param_bounds=[(0.0, 1000.0), (0.51, 10.0), (0.01, 1.0)])
# k = gptools.RationalQuadraticKernel(1,
#                                     initial_params=[1, 20.0, 0.15],
#                                     fixed_params=[False, False, False],
#                                     param_bounds=[(0.0, 1000.0), (0.001, 100.0), (0.01, 1.0)],
#                                     enforce_bounds=True)
k = gptools.GibbsKernel1dTanh(initial_params=[1.0, 0.1, 0.05, 0.003, 0.89],
                              fixed_params=[False, False, False, False, False],
                              param_bounds=[(0.0, 10000.0), (0.01, 10.0),
                                            (0.0001, 1.0), (0.0001, 10),
                                            (0.88, 0.91)],
                              enforce_bounds=True)

# Set noise kernel:
nk = gptools.DiagonalNoiseKernel(1,
                                 n=0,
                                 initial_noise=0.0,
                                 fixed_noise=True,
                                 noise_bound=(0.0, 10.0))

# Create and populate GP:
gp = gptools.GaussianProcess(k, noise_k=nk)
gp.add_data(R_mid_w, Te_TS_w, err_y=dev_Te_TS_w)
gp.add_data(R_mid_ETS_w, Te_ETS_w, err_y=dev_Te_ETS_w)
gp.add_data(R_mid_FRC_w, Te_FRC_w, err_y=dev_Te_FRC_w)
示例#3
0
    def _fit(self, xx, yy, ey):
        import gptools
        norm=mean(abs(yy))
        yy=yy/norm
        ey=ey/norm
        print('********** GPR version 2.0 ***************')

        for kk in range(self.ntanh):

            hprior=(
                # Set a uniform prior for sigmaf
                gptools.UniformJointPrior([(0,10),])*
                # Set Gamma distribution('alternative form') for the other 4 priors of the Gibbs 1D Tanh kernel
                gptools.GammaJointPriorAlt([1.0,0.5,0.0,1.0],[0.3,0.25,0.1,0.05])
                )

            k = gptools.GibbsKernel1dTanh(
                #= ====== =======================================================================
                #0 sigmaf Amplitude of the covariance function
                #1 l1     Small-X saturation value of the length scale.
                #2 l2     Large-X saturation value of the length scale.
                #3 lw     Length scale of the transition between the two length scales.
                #4 x0     Location of the center of the transition between the two length scales.
                #= ====== =======================================================================
                initial_params=self.initial_params,
                fixed_params=[False]*5,
                hyperprior=hprior,
                )

            if kk==0:
                nk = gptools.DiagonalNoiseKernel(1, n=0, initial_noise=mean(ey)*self.noiseLevel,
                        fixed_noise=False, noise_bound=(min(ey), max(ey)*self.noiseLevel))#, enforce_bounds=True)
                print "noise_bound= [", min(ey), ",",max(ey)*self.noiseLevel,"]"
                ke=k
            else:  #the following is from Orso's initial implementation. Not tested on ITBs!
                nk = gptools.DiagonalNoiseKernel(1, n=0, initial_noise=gp.noise_k.params[0], fixed_noise=False)
                k1 = gptools.GibbsKernel1dTanh(
                    initial_params=copy.deepcopy(gp.k.params[-5:]),
                    fixed_params=[False]*5)
                ke+=k1

            # Create and populate GP:
            gp = gptools.GaussianProcess(ke, noise_k=nk)
            gp.add_data(xx, yy, err_y=ey)
            gp.add_data(0, 0, n=1, err_y=0.0) #zero derivative on axis

            #================= Add constraints ====================
            # Impose constraints on values in the SOL
            if self.zero_value_outside:
                    gp.add_data(max([1.1,max(xx)])+0.1, 0, n=0, err_y=mean(ey)) #zero beyond edge
                    gp.add_data(max([1.1,max(xx)])+0.2, 0, n=0, err_y=mean(ey)) #zero beyond edge

            # Impose constraints on derivatives in the SOL
            grad=gradient(yy,xx) # rough estimate of gradients
            gp.add_data(max([1.1,max(xx)]),0,n=1, err_y=max(grad)*max(ey/yy)) # added uncertainty in derivative
            print "Added {:.0f}% of max(gradient) in max(grad) on GPR derivative constraints outside of the LCFS'".format(max(ey/yy)*100)
            gp.add_data(max([1.1,max(xx)])+0.1, 0, n=1) #zero derivative far beyond at edge

            for kk1 in range(1,3):
                if self.zero_value_outside:
                    gp.add_data(max([1.1,max(xx)])+0.1*kk1, 0, n=0, err_y=mean(ey)) #zero at edge
                gp.add_data(max([1.1,max(xx)])+0.1*kk1, 0, n=1) #zero derivative beyond the edge

            # In shots where data is missing at the edge, attempt forcing outer stabilization
            if max(xx)<0.8:
                print "Missing data close to the edge. Fit at rho>0.8 might be rather wild."
                if self.zero_value_outside:
                    if max(ey/yy)<0.1:
                        gp.add_data(1.0, 0, n=0, err_y=max(ey)*2)
                    else:
                        gp.add_data(1.0, 0, n=0, err_y=max(ey))
                # pad SOL with zero-derivative constraints
                for i in arange(5):
                    gp.add_data(1.05+0.02*i,0,n=1) #exact derivative=0

            #============ Optimization of hyperparameters ===========
            print 'Number of random starts: ', self.random_starts
            if kk==0:
                # Optimize hyperparameters:
                gp.optimize_hyperparameters(
                    method='SLSQP',
                    verbose=self.verbose,
                    num_proc=None,    #if 0, optimization with 1 processor in series; if None, use all available processors
                    random_starts=self.random_starts,
                    opt_kwargs={ 'bounds': (ke+nk).free_param_bounds,})

            else:
                # Optimize hyperparameters:
                gp.optimize_hyperparameters(
                    method='SLSQP',
                    verbose=self.verbose,
                    num_proc=None,
                    random_starts=self.random_starts,
                    opt_kwargs={ 'bounds': ke.free_param_bounds,},)

        gp.norm=norm
        self.inferred_params=copy.deepcopy(gp.k.params)
        self.final_noise=copy.deepcopy(gp.noise_k.params)
        print '------> self.inferred_params: ', self.inferred_params
        print '-------> self.final_noise: ', self.final_noise
        print '-------> mean(ey) =', mean(ey)
        print '-------> self.final_noise/ mean(ey) =', self.final_noise/mean(ey)
        return gp
示例#4
0
a_val.plot(X_star, out['samp'], color='y', alpha=0.1)

# When the edge data are incorporated, a stationary kernel such as the SE is
# no longer appropriate:
# Either the region of rapid change will drive the fit to short values, or the
# gradual region will cause the rapid change to be oversmoothed.
# :py:class:`GibbsKernel1dTanh` was designed to fit nonstationary data like
# this where there is a smooth region and a rough region. An arbitrary length
# scale function can be selected by following this template. In addition, more
# powerful input warpings are provided in the :py:mod:`warping` submodule. See
# the manual for more details.
hp = (
    gptools.UniformJointPrior([[0.0, 20.0]]) *
    gptools.GammaJointPriorAlt([1.0, 0.5, 0.0, 1.0], [0.3, 0.25, 0.1, 0.1])
)
k_gibbs = gptools.GibbsKernel1dTanh(hyperprior=hp)
gp = gptools.GaussianProcess(k_gibbs)
gp.add_data(core_data['X'], core_data['y'], err_y=core_data['err_y'])
gp.add_data(edge_data['X'], edge_data['y'], err_y=edge_data['err_y'])
gp.add_data(0, 0, n=1)
#gp.optimize_hyperparameters(verbose=True)
y_star, std_y_star = gp.predict(X_star)
gptools.univariate_envelope_plot(
    X_star,
    y_star,
    std_y_star,
    label='whole profile, Gibbs+tanh kernel',
    ax=a_val
)
grad_y_star, std_grad_y_star = gp.predict(X_star, n=1)
gptools.univariate_envelope_plot(
示例#5
0
import setupplots
setupplots.thesis_format()
import scipy
import matplotlib.pyplot as plt
plt.ion()
import gptools

X = scipy.atleast_2d(scipy.linspace(0.0, 1.5, 400)).T
zeros = scipy.zeros_like(X)
ones = scipy.ones_like(X)

xi_vals = [0.0, 0.5, 1.0, 1.2]

# Gibbs+tanh:
f = plt.figure(figsize=(setupplots.TEXTWIDTH, setupplots.TEXTWIDTH / 1.618))
k = gptools.GibbsKernel1dTanh(initial_params=[1.0, 1.0, 0.5, 0.02, 1.0])
for i, xi in enumerate(xi_vals):
    k_00 = k(zeros + xi, X, zeros, zeros)
    k_01 = k(zeros + xi, X, zeros, ones)
    k_10 = k(zeros + xi, X, ones, zeros)
    k_11 = k(zeros + xi, X, ones, ones)
    l = k.l_func(X.ravel(), 0, *k.params[1:])

    a = f.add_subplot(2, 2, i + 1)
    c, = a.plot(X.ravel(), k_00, label='$n_i=0$, $n_j=0$', lw=3)
    a.axvline(xi, color=c.get_color())
    a.plot(X.ravel(), k_01, '--', label='$n_i=0$, $n_j=1$')
    a.plot(X.ravel(), k_10, '--', label='$n_i=1$, $n_j=0$')
    a.plot(X.ravel(), k_11, '--', label='$n_i=1$, $n_j=1$')
    a.plot(X.ravel(), l, ':', label=r'$\ell(x)$', lw=3)