Exemplo n.º 1
0
def GP(kernel, kernel_params, white=False):
    '''

    '''

    if kernel == 'Basic':
        w, a, t = kernel_params
        if white:
            if OLDGEORGE:
                return george.GP(
                    WhiteKernel(w**2) + a**2 * Matern32Kernel(t**2))
            else:
                return george.GP(a**2 * Matern32Kernel(t**2),
                                 white_noise=np.log(w**2),
                                 fit_white_noise=True)
        else:
            return george.GP(a**2 * Matern32Kernel(t**2))
    elif kernel == 'QuasiPeriodic':
        w, a, g, p = kernel_params
        if white:
            if OLDGEORGE:
                return george.GP(
                    WhiteKernel(w**2) + a**2 * ExpSine2Kernel(g, p))
            else:
                return george.GP(a**2 * ExpSine2Kernel(g, p),
                                 white_noise=np.log(w**2),
                                 fit_white_noise=True)
        else:
            return george.GP(a**2 * ExpSine2Kernel(g, p))
    else:
        raise ValueError('Invalid value for `kernel`.')
Exemplo n.º 2
0
def GP(kernel, kernel_params, white = False):
    if kernel == 'Basic':
        w, a, t = kernel_params
        if white:
            return george.GP(WhiteKernel(w ** 2) + a ** 2 * Matern32Kernel(t ** 2))
        else:
            return george.GP(a ** 2 * Matern32Kernel(t ** 2))
    elif kernel == 'QuasiPeriodic':
        w, a, g, p = kernel_params
        if white:
            return george.GP(WhiteKernel(w ** 2) + a ** 2 * ExpSine2Kernel(g, p))
        else:
            return george.GP(a ** 2 * ExpSine2Kernel(g, p))
    else:
        raise ValueError('Invalid value for `kernel`.')
Exemplo n.º 3
0
def create_fake_data(add_sin=False):
    # fake 1D lightcurve.
    # data is a constant, plus and airmass term, plus a random cloud
    x = np.linspace(0, 8, 101)

    # model cloud using Gaussian Process
    kernel = 2 * Matern32Kernel(1.0)
    gp = george.GP(kernel)
    gp.compute(x, 0.1 * np.ones_like(x))
    cloud = np.fabs(gp.sample(x))

    # const + airmass + cloud
    ybase = 20.0 + 5 * np.sin(np.pi * x / 12.) - cloud
    #plt.plot(ybase)
    #plt.show()

    # stack many times to get 2D array [stars,hours]
    nstars = 20
    data = np.repeat(ybase[:, np.newaxis], nstars, axis=1).T

    # add gaussian noise (amplitude around 1)
    data += np.random.normal(loc=0, scale=0.8, size=data.shape)

    # add sine wave to star 11 if required
    if add_sin:
        data[10, :] += 40.0 * np.sin(2.0 * np.pi * x / 3.5)

    return data
Exemplo n.º 4
0
def plot_gp_one_season(data):
    """
    Plot DES light curve as a single season.

    Parameters
    ----------
    data : `pandas.DataFrame`
        DataFrame object contaning a full DES light curve. This should be
        obtained though [NAME OF LOCAL QUERY FUNC] or [NAME OF EASYACCESS
        WRAPPER]

    Returns
    -------
    fig : `plt.figure`
        PyPlot canvas object, if originally provided as a parameter the
        original object will be returned, otherwise new object is created.
    """
    flux_arr = []

    gdf = data.groupby('band')
    for group, obs in gdf:
        plt.errorbar(obs['mjd'],
                     obs['flux'],
                     yerr=obs['fluxerr'],
                     fmt='o',
                     c=band_colour(group),
                     label=group)

        flux_norm = obs['flux'].mean()
        obs['flux'] /= flux_norm
        obs['fluxerr'] /= flux_norm

        gp = george.GP(Matern32Kernel(np.exp(10)))
        gp.compute(obs['mjd'], obs['fluxerr'])
        p0 = gp.kernel.vector
        opt.minimize(ll, p0, jac=grad_ll, args=(gp, obs))

        t = np.linspace(data['mjd'].min(), data['mjd'].max(), 500)
        mu, cov = gp.predict(obs['flux'], t)
        std = np.sqrt(np.diag(cov))

        mu *= flux_norm
        std *= flux_norm
        flux_arr.append(mu)

        plt.fill_between(t,
                         mu + std,
                         mu - std,
                         color=band_colour(group),
                         alpha=0.4)
        plt.plot(t, mu, color=band_colour(group))

    plt.tight_layout()
    plt.legend(fontsize=16, loc='best')
Exemplo n.º 5
0
def gp_des_lc(data):
    df = pd.DataFrame({
        'mjd': [],
        'flux': [],
        'season': [],
        'band': [],
        'snid': []
    })

    gdf = data.groupby(('season', 'band'))
    for group, obs in gdf:
        mask = ((edges['season'] == group[0]) &
                (edges['field'] == obs['field'].values[0]))
        edge_query = edges[mask]
        min_edge = edge_query['min_mjd'].values[0]

        if (int(obs['mjd'].min()) > min_edge or int(obs['mjd'].max()) <
            (min_edge + 149)):
            print(min_edge, min_edge + 149)
            print(obs['mjd'].min(), obs['mjd'].max())
            return

        flux_norm = obs['flux'].mean()
        obs['flux'] /= flux_norm
        obs['fluxerr'] /= flux_norm

        if flux_norm == 0.0000:
            print('Flux avarage is 0')
            return

        gp = george.GP(Matern32Kernel(np.exp(10)))
        gp.compute(obs['mjd'], obs['fluxerr'])
        p0 = gp.kernel.vector
        opt.minimize(ll, p0, jac=grad_ll, args=(gp, obs))

        # t = np.linspace(0, 149, 23) + min_edge
        t = np.linspace(0, 149, 46) + min_edge
        try:
            mu, cov = gp.predict(obs['flux'], t)
        except:
            print('Could not interpolate LC')
            return

        mu *= flux_norm
        mu *= (10**1.56)
        temp_df = pd.DataFrame({'mjd': t, 'flux': mu})
        temp_df['season'] = group[0]
        temp_df['band'] = group[1]
        temp_df['snid'] = obs['snid'].values[0]

        df = pd.concat((df, temp_df))

    df.to_sql('real_nonia_interp_46', engine, if_exists='append', index=False)
Exemplo n.º 6
0
    def __init__(self, lc, dist_factor=10.0, time_factor=0.1, tau_frac=0.25):
        self.time = lc.time
        mu = np.median(lc.flux)
        self.flux = lc.flux / mu - 1.0
        self.ferr = lc.ferr / mu

        # Convert to parts per thousand.
        self.flux *= 1e3
        self.ferr *= 1e3

        # Estimate the kernel parameters.
        tau = tau_frac * estimate_tau(self.time, self.flux)
        self.kernel = np.var(self.flux) * Matern32Kernel(tau**2)
        self.gp = george.GP(self.kernel, solver=george.HODLRSolver)
        self.K_0 = self.gp.get_matrix(self.time)
        self.gp.compute(self.time, self.ferr, seed=1234)
        self.alpha = self.gp.solver.apply_inverse(self.flux)

        # Compute the likelihood of the null model.
        self.ll0 = self.lnlike()
Exemplo n.º 7
0
    # vae = load_model(ModelDir + 'fullAE_' + fileOut + '.hdf5')
    encoder = load_model(ModelDir + 'EncoderP' + str(num_para) + ClID + '_' +
                         fileOut + '.hdf5')
    decoder = load_model(ModelDir + 'DecoderP' + str(num_para) + ClID + '_' +
                         fileOut + '.hdf5')
    history = np.loadtxt(ModelDir + 'TrainingHistoryP' + str(num_para) + ClID +
                         '_' + fileOut + '.txt')

import george
from george.kernels import Matern32Kernel  # , ConstantKernel, WhiteKernel, Matern52Kernel

# kernel = ConstantKernel(0.5, ndim=num_para) * Matern52Kernel(0.9, ndim=num_para) + WhiteKernel( 0.1, ndim=num_para)
#kernel = Matern32Kernel(1000, ndim=num_para)
# kernel = Matern32Kernel( [1000,2000,2000,1000,1000], ndim=num_para)
kernel = Matern32Kernel([1000, 4000, 3000, 1000, 2000], ndim=num_para)
#kernel = Matern32Kernel( [1,0.5,1,1.4,0.5], ndim=num_para)

#kernel = Matern32Kernel(ndim=num_para)

# This kernel (and more importantly its subclasses) computes
# the distance between two samples in an arbitrary metric and applies a radial function to this distance.
# metric: The specification of the metric. This can be a float, in which case the metric is considered isotropic
# with the variance in each dimension given by the value of metric.
# Alternatively, metric can be a list of variances for each dimension. In this case, it should have length ndim.
# The fully general not axis aligned metric hasn't been implemented yet

# PLOTTING y_train and y_test

# import pandas as pd
# plt.figure(431)
Exemplo n.º 8
0
        def mangle_spectrum_function(self, show=False):
            if (not hasattr(self, 'mangledspec_path')):
                self.create_mangledspec_folder()
            elif (not hasattr(self, 'raw_spec')):
                self.load_raw_spec()

            ratios = []
            ratios_err = []
            fitted_phot_list = []
            fitted_photerr_list = []
            wls_eff = []
            used_filters = []

            outMJD_ratios = []
            outMJD_ratios_err = []
            outMJD_fitted_phot_list = []
            outMJD_fitted_photerr_list = []
            outMJD_wls_eff = []
            outMJD_used_filters = []

            outwls_filters_wls_eff = []
            outwls_filters = []
            outwls_fitted_phot_list = []
            outwls_fitted_photerr_list = []

            for filt in self.avail_filters:
                fitted_phot = self.phot4mangling['%s_fitflux' % filt].values[0]
                fitted_phot_err = self.phot4mangling['%s_fitfluxerr' %
                                                     filt].values[0]
                inMJDrange_pnt = self.phot4mangling['%s_inrange' %
                                                    filt].values[0]

                lam_avg, lam_eff, raw_phot, raw_phot_err, min_wls, max_wls = self.band_flux(
                    filt, type_spec2use='raw_spec')

                if filt in ['Bessell_R', 'sdss_z', "sdss_z'"]:
                    condition = (lam_avg > min(
                        self.raw_spec['wls'])) & (lam_avg < max(
                            self.raw_spec['wls'])) & (fitted_phot > 0.)
                else:
                    condition = (lam_avg > min(
                        self.raw_spec['wls'])) & (lam_avg < max(
                            self.raw_spec['wls'])) & (fitted_phot > 0.)
                if (not inMJDrange_pnt):
                    outMJD_fitted_phot_list.append(fitted_phot)
                    outMJD_fitted_photerr_list.append(fitted_phot_err)
                    outMJD_ratios.append(fitted_phot / raw_phot)
                    outMJD_ratios_err.append(
                        prop_err_div(fitted_phot, raw_phot, fitted_phot_err,
                                     0.))
                    outMJD_wls_eff.append(lam_eff)
                    outMJD_used_filters.append(filt)
                else:
                    if condition:
                        fitted_phot_list.append(fitted_phot)
                        fitted_photerr_list.append(fitted_phot_err)
                        ratios.append(fitted_phot / raw_phot)
                        ratios_err.append(
                            prop_err_div(fitted_phot, raw_phot,
                                         fitted_phot_err, 0.))
                        wls_eff.append(lam_eff)
                        used_filters.append(filt)
                    else:
                        if (fitted_phot > 0.):
                            outwls_filters.append(filt)
                            outwls_filters_wls_eff.append(lam_avg)
                            outwls_fitted_phot_list.append(fitted_phot)
                            outwls_fitted_photerr_list.append(fitted_phot_err)
            ratios = np.array(ratios)
            wls_eff = np.array(wls_eff)
            ratios_err = np.array(ratios_err)
            used_filters = np.array(used_filters)

            if len(ratios) < 1:
                print('Impossible to mangle ', self.spec_file, 'used',
                      used_filters, '\nUnused', outMJD_used_filters)
                return None
            else:
                if len(self.raw_spec['wls']) > 10**4:
                    # GP struggle to handle such a big number of points
                    int_fraction = int(len(self.raw_spec['wls']) / 5000.)
                    print(
                        'This spectrum has a huge amount of data points(%i), Im chopping a %i th of them'
                        % (len(self.raw_spec['wls']), int_fraction))
                    full_wls = self.raw_spec['wls'][::int_fraction]
                else:
                    full_wls = self.raw_spec['wls']
                norm_wls = np.median(full_wls)
                full_wls_normed = full_wls / norm_wls
                wls_eff_normed = np.array(wls_eff) / norm_wls

                offset = 1.
                norm = np.mean(ratios)
                ratios_normed = np.array(ratios) / norm - offset
                ratios_err_normed = np.array(ratios_err) / norm

                def ll(p):
                    gp.set_parameter_vector(p)
                    scale = np.exp(
                        gp.get_parameter_dict()['kernel:k2:metric:log_M_0_0'])
                    if scale < 0.09:
                        return np.inf
                    else:
                        return -gp.lnlikelihood(ratios_normed, quiet=False)  #

                def grad_ll(p):
                    gp.set_parameter_vector(p)
                    return -gp.grad_lnlikelihood(ratios_normed, quiet=False)

                k = np.var(ratios_normed) * Matern32Kernel(0.2)

                wls_eff_normedT = np.atleast_2d(wls_eff_normed).T
                gp = george.GP(k)
                gp.compute(wls_eff_normedT, (ratios_err_normed))
                p0 = gp.get_parameter_vector()

                results = opt.minimize(ll, p0, jac=grad_ll)
                # print ('SCALE', np.exp(results.x))
                # print ('#######', wls_eff,'\n', ratios,'\n', ratios_err,'\n', ratios_normed,'\n', norm)
                gp.set_parameter_vector(results.x)
                mu, cov = gp.predict(ratios_normed, full_wls_normed)
                std = np.sqrt(np.diag(cov))

                spec_number = self.phot4mangling.spec_mjd.values[
                    0]  # self.phot4mangling.index.values[0]

                fig = plt.figure(1, figsize=(14, 6))
                plt.rc('font', family='serif')
                plt.rc('xtick', labelsize=13)
                plt.rc('ytick', labelsize=13)
                ax1 = plt.subplot2grid((5, 1), (0, 0), rowspan=2)
                for f, w, r, rerr in zip(used_filters, wls_eff, ratios,
                                         ratios_err):
                    if 'swift' in f:
                        flabel = f.split('_')[1] + '(Swift)'
                    else:
                        flabel = f.split('_')[1]
                    ax1.errorbar(w, r, yerr=rerr, marker=mark_dict[f], ms=8,
                                 mfc=color_dict[f], mec=color_dict[f], linestyle='None', \
                                 ecolor=color_dict[f], label='%s' % flabel)
                ax1.errorbar(full_wls,
                             norm * (mu + offset),
                             color='orange',
                             label='Mangling\nfunction')
                ax1.fill_between(full_wls,
                                 norm * ((mu + offset) - std),
                                 norm * ((mu + offset) + std),
                                 color='orange',
                                 alpha=0.3)
                ax1.set_ylabel('Photometric Flux/\nSynthetic Flux',
                               fontsize=13)

                ax1.set_title('Example of mangling using a spectrum of %s' %
                              self.snname,
                              fontsize=15)

                ax1.set_ylim(min(ratios) * (0.9), max(ratios) * (1.1))
                ax1.set_xlim(1600., 10300.)
                plt.tick_params(axis='x', labelbottom=False)
                ax1.legend(ncol=2, fontsize=12, loc='best')
                if len(self.raw_spec['wls']) > 10**4:
                    # GP struggle to handle such a big number of points
                    mu_full = np.interp(self.raw_spec['wls'],
                                        self.raw_spec['wls'][::int_fraction],
                                        mu)
                    std_full = np.interp(self.raw_spec['wls'],
                                         self.raw_spec['wls'][::int_fraction],
                                         std)
                else:
                    mu_full = mu
                    std_full = std

                mangled_spec = self.raw_spec['flux'] * norm * (mu_full +
                                                               offset)
                mangled_spec_err = (
                    (norm * (std_full) * self.raw_spec['flux'])**2 +
                    ((norm *
                      (mu_full + offset)) * self.raw_spec['fluxerr'])**2)**0.5

                self.mangled_spec = np.array([
                    a for a in zip(self.raw_spec['wls'], mangled_spec,
                                   mangled_spec_err)
                ],
                                             dtype=[('wls', '<f8'),
                                                    ('flux', '<f8'),
                                                    ('fluxerr', '<f8')])
                self.fitted_phot_dict = {
                    'eff_wls': wls_eff,
                    'fitted_phot': fitted_phot_list,
                    'fitted_phot_err': fitted_photerr_list,
                    'used_filters': used_filters
                }
                self.mangling_mask = norm * (mu_full + offset)
                mangled_phot_list = []
                for filt in used_filters:
                    lam_eff, mangled_phot = self.band_flux(
                        filt, type_spec2use='mangled')
                    mangled_phot_list.append(mangled_phot)
                self.magled_photometry_dict = {
                    'eff_wls': wls_eff,
                    'fitted_phot': mangled_phot_list,
                    'used_filters': used_filters
                }

                ax2 = plt.subplot2grid((5, 1), (2, 0), rowspan=3)
                ax2.errorbar(self.mangled_spec['wls'],
                             self.mangled_spec['flux'],
                             lw=0.9,
                             color='k',
                             label='Mangled Spectrum')
                mask_neg = self.mangled_spec['flux'] < 0.
                ax2.errorbar(self.mangled_spec['wls'][mask_neg],
                             self.mangled_spec['flux'][mask_neg],
                             lw=3.9,
                             color='m',
                             label='Mangled Spectrum')
                ax2.errorbar(self.raw_spec['wls'],
                             self.raw_spec['flux'] * norm,
                             lw=0.6,
                             color='r',
                             alpha=1,
                             label='Uncalibrated Spectrum\n+ Offset')
                ax2.errorbar(np.concatenate([self.fitted_phot_dict['eff_wls'], outwls_filters_wls_eff]),
                             np.concatenate([self.fitted_phot_dict['fitted_phot'], outwls_fitted_phot_list]), \
                             yerr=np.concatenate([self.fitted_phot_dict['fitted_phot_err'],
                                                  outwls_fitted_photerr_list]),
                             marker='o', mfc='grey', mec='grey', ms=7,
                             ecolor='grey', linestyle='None', label='Photometric flux\nfrom LC fitting')

                ax2.errorbar(self.magled_photometry_dict['eff_wls'],
                             self.magled_photometry_dict['fitted_phot'],
                             marker='^',
                             mfc='r',
                             mec='r',
                             linestyle='None',
                             ms=7,
                             label='Flux synthesized\nfrom mangled spec')

                ax2.fill_between(
                    self.mangled_spec['wls'],
                    self.mangled_spec['flux'] - self.mangled_spec['fluxerr'],
                    self.mangled_spec['flux'] + self.mangled_spec['fluxerr'],
                    color='grey',
                    alpha=0.3)

                ax2.set_ylabel(
                    r'Flux ($\mathrm{erg}$ $\mathrm{s^{-1} cm^{-2}} \mathrm{\AA} $)',
                    fontsize=13)
                ax2.set_xlabel(r'Wavelength ($\mathrm{\AA}$)', fontsize=13)
                ax2.set_xlim(1600., 10300.)
                ax2.legend(ncol=1, fontsize=13, loc='upper right')
                plt.subplots_adjust(hspace=0.3, wspace=0.1)
                fig.savefig(self.mangledspec_path +
                            '%.2f_mangled_spec.pdf' % spec_number,
                            bbox_inches='tight')
                if show: plt.show()
                plt.close(fig)
                self.save_mangled_spectrum()
                return {'wls': self.raw_spec['wls'], 'flux': mangled_spec}
Exemplo n.º 9
0
def run_2DGP_GRID(GP2DIM_Class, y_data_nonan, y_data_nonan_err, x1_data_norm, x2_data_norm,\
  kernel_wls_scale, kernel_time_scale, extrap_mjds, prior=False, points=np.nan, values=np.nan):
    """ ## for NUV extention:   extrap_mjds = grid_ext_columns
	## for spectra augmentation: 
	extrap_mjds = grid_ext.columns.values
	 if (len(extrap_mjds)>200):
		 extrap_mjds = grid_ext.columns.values[:200]
	 if (max(extrap_mjds-min(extrap_mjds))>200):
		 extrap_mjds = extrap_mjds[extrap_mjds-min(extrap_mjds)<200]
	 
	 tot_iteration = int(len(extrap_mjds)/slot_size+1)
	 print (tot_iteration)"""

    # TRAINING: X, y, terr
    norm1 = GP2DIM_Class.grid_norm_info['norm1']
    norm2 = GP2DIM_Class.grid_norm_info['norm2']

    if prior:
        from george.modeling import Model

        class Model_2dim(Model):
            parameter_names = ()

            def get_value(self, t):
                points_eval = np.array([tup for tup in zip(t[:, 0], t[:, 1])])
                grid_z1 = griddata(points,
                                   values,
                                   points_eval,
                                   method='nearest')
                grid_z1[np.isnan(grid_z1)] = 0.
                #plt.plot(t[:,0]*norm1, grid_z1, '-b', label='PRIOR')
                return grid_z1

        mean_model = Model_2dim()

    X = np.vstack((x1_data_norm, x2_data_norm)).T
    y = y_data_nonan
    yerr = y_data_nonan_err

    kernel_mix = Matern32Kernel([kernel_wls_scale, kernel_time_scale], ndim=2)
    kernel2dim = np.var(y) * kernel_mix  #+ 0.3*np.var(y)*kernel2*kernel1

    if prior:
        gp = george.GP(
            kernel2dim,
            mean=mean_model)  #, fit_mean=True, fit_white_noise=True)
    else:
        gp = george.GP(kernel2dim)

    gp.compute(X, yerr)

    wls_normed_range = np.sort(
        np.concatenate((np.arange(1600., 3000., 40), np.arange(
            3000., 9000., 10), np.arange(
                9000., 10350., 40)))) / GP2DIM_Class.grid_norm_info['norm1']

    mu_fill_resh = []
    std_fill_resh = []

    slot_size = 3
    tot_iteration = int(len(extrap_mjds) / slot_size + 1)

    for j in range(int(len(extrap_mjds) / slot_size + 1)):
        mjd_normed_range = ((extrap_mjds[j * slot_size:(j + 1) * slot_size]) -
                            GP2DIM_Class.grid_norm_info['offset2']
                            ) / GP2DIM_Class.grid_norm_info['norm2']
        x1_fill = []  #np.random.permutation(np.linspace(0,1., N))
        x2_fill = []  #np.random.permutation(np.linspace(0,1., N))
        for i in wls_normed_range:
            for k in mjd_normed_range:
                x1_fill.append(i)
                x2_fill.append(k)

        x1_fill = np.array(x1_fill)
        x2_fill = np.array(x2_fill)

        X_fill = np.vstack((x1_fill, x2_fill)).T
        if GP2DIM_Class.verbose:
            print(j, 'of', int(len(extrap_mjds) / slot_size + 1))
        frac_tot_iteration = int(20. * (j + 1) / tot_iteration)
        #print('[','*'*frac_tot_iteration,' '*(20-frac_tot_iteration),']' + ' %i of %i'%(slot_size*(j+1),slot_size*tot_iteration)+' spec extrapolated', end='\r')
        mu_iter, cov_iter = (gp.predict(y, X_fill, return_cov=True))
        std_iter = np.sqrt(np.diag(cov_iter))
        count = 0
        for mj in mjd_normed_range:
            fig = plt.figure(figsize=(8, 2))
            plt.subplot(1, slot_size, count + 1)
            plt.plot(norm1 * x1_fill[x2_fill == mj],
                     mu_iter[x2_fill == mj],
                     '-k',
                     label='PREDICTION')
            if prior:
                points_eval = np.array([
                    tup for tup in zip(x1_fill[x2_fill == mj], x2_fill[x2_fill
                                                                       == mj])
                ])
                grid_z1 = griddata(points,
                                   values,
                                   points_eval,
                                   method='nearest')
                grid_z1[np.isnan(grid_z1)] = 0.
                plt.plot(norm1 * x1_fill[x2_fill == mj],
                         grid_z1,
                         '-b',
                         label='PRIOR')
            plt.legend()
            plt.show()
            plt.close(fig)
            count = count + 1

        mu_resh_iter = mu_iter.reshape(len(wls_normed_range),
                                       len(mjd_normed_range))
        std_resh_iter = std_iter.reshape(len(wls_normed_range),
                                         len(mjd_normed_range))

        if mu_fill_resh == []:
            mu_fill_resh = np.copy(mu_resh_iter)
            std_fill_resh = np.copy(std_resh_iter)
        else:
            mu_fill_resh = np.concatenate([mu_fill_resh, mu_resh_iter], axis=1)
            std_fill_resh = np.concatenate([std_fill_resh, std_resh_iter],
                                           axis=1)

    print(
        '[', '*' * frac_tot_iteration, ' ' * (20 - frac_tot_iteration),
        ']' + '%i of %i' % (slot_size * (j + 1), slot_size * tot_iteration) +
        'spec extrapolated')
    mu_fill = mu_fill_resh.reshape(len(wls_normed_range) * len(extrap_mjds))
    std_fill = std_fill_resh.reshape(len(wls_normed_range) * len(extrap_mjds))

    mjd_normed_range = (extrap_mjds - GP2DIM_Class.grid_norm_info['offset2']
                        ) / GP2DIM_Class.grid_norm_info['norm2']

    x1_fill = []  #np.random.permutation(np.linspace(0,1., N))
    x2_fill = []  #np.random.permutation(np.linspace(0,1., N))
    for i in wls_normed_range:
        for k in mjd_normed_range:
            x1_fill.append(i)
            x2_fill.append(k)

    x1_fill = np.array(x1_fill)
    x2_fill = np.array(x2_fill)

    print('EXTENDING SPECTRA BETWEEN:')
    print('WLS:', min(x1_fill * GP2DIM_Class.grid_norm_info['norm1']),
          max(x1_fill * GP2DIM_Class.grid_norm_info['norm1']))
    print('MJD:', min(x2_fill * GP2DIM_Class.grid_norm_info['norm2']),
          max(x2_fill * GP2DIM_Class.grid_norm_info['norm2']))

    return (x1_fill, x2_fill, mu_fill, std_fill)
Exemplo n.º 10
0
        def GP_interpolation_mangle(self,
                                    wls_eff,
                                    ratios,
                                    ratios_err,
                                    min_scale,
                                    optimization=True):
            if len(self.ext_spec['wls']) > 10**4:
                # GP struggle to handle such a big number of points
                int_fraction = int(len(self.ext_spec['wls']) / 5000.)
                print(
                    'This spectrum has a huge amount of data points(%i), Im chopping a %i th of them'
                    % (len(self.ext_spec['wls']), int_fraction))
                full_wls = self.ext_spec['wls'][::int_fraction]
            else:
                full_wls = self.ext_spec['wls']

            norm_wls = np.median(full_wls)
            full_wls_normed = full_wls / norm_wls
            wls_eff_normed = np.array(wls_eff) / norm_wls

            offset = 1.
            norm = np.mean(ratios)
            ratios_normed = np.array(ratios) / norm - offset
            ratios_err_normed = np.array(ratios_err) / norm

            if len(ratios_normed) < 1:
                return np.ones(len(full_wls_normed)) * np.nan, np.ones(
                    len(full_wls_normed)) * np.nan
            else:

                def ll(p):
                    # print (np.exp(p))
                    if (np.exp(p)[1] < 5 * 10**-3):  # |(np.exp(p)[1]>10**5):
                        return np.inf
                    else:
                        gp.set_parameter_vector(p)
                        return -gp.lnlikelihood(ratios_normed, quiet=False)  #

                def grad_ll(p):
                    gp.set_parameter_vector(p)
                    return -gp.grad_lnlikelihood(ratios_normed, quiet=False)

                k = np.var(ratios_normed) * Matern32Kernel(0.3)
                wls_eff_normedT = np.atleast_2d(wls_eff_normed).T
                gp = george.GP(k)
                gp.compute(wls_eff_normedT, (ratios_err_normed))
                if optimization:
                    try:
                        p0 = gp.get_parameter_vector()
                        results = opt.minimize(ll, p0, jac=grad_ll)
                        print('SCALE:', '%.4f' % np.exp(results.x[1]))
                    except:
                        pass
                        print('*** GP optimization failed ***' * 10)
                # print ('results', np.exp(results.x))
                mu, cov = gp.predict(ratios_normed, full_wls_normed)
                std = np.sqrt(np.diag(cov))

                if len(self.ext_spec['wls']) > 10**4:
                    # GP struggle to handle such a big number of points
                    mu_full = np.interp(self.ext_spec['wls'],
                                        self.ext_spec['wls'][::int_fraction],
                                        mu)
                    std_full = np.interp(self.ext_spec['wls'],
                                         self.ext_spec['wls'][::int_fraction],
                                         std)
                else:
                    mu_full = mu
                    std_full = std
                return norm * (mu_full + offset), np.abs(norm * (std_full))
Exemplo n.º 11
0
def main():
    """
	Meat of the code

	"""
    insn = sys.argv[1]
    inband = sys.argv[2]
    par = np.loadtxt(pt + 'tmax_dm15.dat', dtype='string')
    lc = read_inp_lc('../files/SDSS_SN00013689.DAT')
    print lc[0]
    #return 0
    lc = np.loadtxt('../files/SN13689_data.DAT')
    mag1 = lc[:, 1]
    ph1 = lc[:, 0]
    mag = lc[:, 1]
    magerr = lc[:, 2]
    magerr /= max(mag)
    mag /= max(mag)

    # Set up the Gaussian process.
    param = float(sys.argv[3])
    kernel = RationalQuadraticKernel(param, 100) + Matern32Kernel(
        param)  #ExpSquaredKernel(param)
    gp = george.GP(kernel)

    #lc=set_arr(insn, inband)

    def nll(p):
        # Update the kernel parameters and compute the likelihood.
        gp.kernel[:] = p
        ll = gp.lnlikelihood(mag, quiet=True)

        # The scipy optimizer doesn't play well with infinities.
        return -ll if np.isfinite(ll) else 1e25

# And the gradient of the objective function.

    def grad_nll(p):
        # Update the kernel parameters and compute the likelihood.
        gp.kernel[:] = p
        return -gp.grad_lnlikelihood(mag, quiet=True)

    #ph=lc['MJD']-tbmax
    #condition for second maximum
    ##TODO: GUI for selecting region
    #cond=(ph>=10.0) & (ph<=40.0)
    #define the data in the region of interest
    """
	ph1=ph[cond]
	
	mag=lc[inband][cond]

	magerr=lc['e_'+inband][cond]
	"""

    print "Fitting with george"

    #print max(mag)

    # Pre-compute the factorization of the matrix.
    gp.compute(ph1, magerr)
    print gp.lnlikelihood(mag), gp.grad_lnlikelihood(mag)

    gp.compute(ph1, magerr)
    if sys.argv[4] == 'mle':
        p0 = gp.kernel.vector
        results = op.minimize(nll, p0, jac=grad_nll)

        gp.kernel[:] = results.x

    print gp.lnlikelihood(mag), gp.kernel.value
    #vertically stack it for no apparent reason
    arr = np.vstack([ph1, mag, magerr]).T

    #define x array for applying the fit
    t = np.linspace(ph1.min(), ph1.max(), 500)

    #print t.min()#gp.lnlikelihood(mag)
    mu, cov = gp.predict(mag, t)

    #condition for peak
    mpeak = (mu == min(mu))

    #calculate the standard deviation from covariance matrix
    std = np.sqrt(np.diag(cov))

    #as a check for parameters, print the array at max out
    print t[mpeak][0], max(mu), std[mpeak][0]

    #return 0
    arr = np.vstack([t, mu, std]).T

    np.savetxt("mle_gpfit_SDSS_new.txt", arr)
    plt.errorbar(ph1, mag, magerr, fmt=".k", capsize=2, label='data')

    plt.plot(t, mu, 'k:', label='best fit')
    plt.fill_between(t, mu - std, mu + std, alpha=0.3)
    plt.legend(loc=0)

    #plt.ylim(plt.ylim()[::-1])
    plt.xlabel("MJD")
    plt.ylabel("flux")
    plt.savefig("mle_SDSS_gpfit_george.pdf")
    plt.show()
Exemplo n.º 12
0
        def LCfit_withGP_xfilter(self, filt, minMJD=None, maxMJD=None):
            if not hasattr(self, "phot"):
                self.load()

            def ll(p):
                gp.set_parameter_vector(p)
                return -gp.log_likelihood(flux_norm,
                                          quiet=False)  # OFEK: changed from deprecated -gp.lnlikelihood(flux_norm, quiet=False)

            # gradient of the  liklihood for optimisation of the kernel size

            def grad_ll(p):
                gp.set_parameter_vector(p)
                return -gp.grad_log_likelihood(flux_norm,
                                               quiet=False)  # OFEK: changed from deprecated  -gp.grad_lnlikelihood(flux_norm, quiet=False)

            mjd_peak = self.get_mjdpeak()

            mjd_spectra = self.get_spec_mjd()
            if minMJD is None:
                # minMJD= min([mjd_peak-15.,np.min(self.clipped_phot['MJD'])])  #np.min(self.clipped_phot['MJD'])
                minMJD = min([min(mjd_spectra), np.min(self.clipped_phot['MJD'])])  # np.min(self.clipped_phot['MJD'])
            if maxMJD is None:
                # maxMJD= min([max(mjd_spectra),np.max(self.phot['MJD'])])
                maxMJD = min([minMJD + 300., np.max(self.phot['MJD'])])

            print('MJD range', maxMJD - minMJD)
            new_mjd = np.arange(minMJD - 1., maxMJD + 1., 0.05)

            LC_filt_extended = self.get_singlefilter(filt, extended_clipped=True)
            mjd = LC_filt_extended['MJD']
            mjdT = np.atleast_2d(mjd).T

            orig_flux = (LC_filt_extended['Flux'])
            orig_flux_err = LC_filt_extended['Flux_err']

            if TRY_LOG:
                flux_gp = np.log(LC_filt_extended['Flux'])
                flux_err_gp = LC_filt_extended['Flux_err'] / LC_filt_extended['Flux']
            else:
                flux_gp = (LC_filt_extended['Flux'])
                flux_err_gp = LC_filt_extended['Flux_err']

            keep = ~np.isnan(flux_gp) * ~np.isnan(flux_err_gp)
            mjdT, flux_gp, flux_err_gp = mjdT[keep], flux_gp[keep], flux_err_gp[
                keep]  # OFEK: kick nans (from t==t_explosion)

            norm = np.max(flux_gp)  # np.median(flux_gp)
            flux_norm = flux_gp / norm
            err_flux_norm = flux_err_gp / norm

            if (self.snname in list(kernel_settings_dict.keys())):
                if filt in list(kernel_settings_dict[self.snname].keys()):
                    set_scale = kernel_settings_dict[self.snname][filt]['scale']
                    set_optimization = kernel_settings_dict[self.snname][filt]['opt']
                    set_mean = kernel_settings_dict[self.snname][filt]['mean']
                else:
                    set_scale, set_optimization, set_mean = set_default_kernel_settings(filt)
            else:
                set_scale, set_optimization, set_mean = set_default_kernel_settings(filt)
            if set_mean:
                set_fit_mean = True
            else:
                set_fit_mean = False

            k = np.var(flux_norm) * Matern32Kernel(set_scale)
            gp = george.GP(k, mean=set_mean, fit_mean=set_fit_mean)  # ,
            # white_noise=10**-5, fit_white_noise=True)

            gp.compute(mjdT, err_flux_norm)

            if set_optimization:
                p0 = gp.get_parameter_vector()
                results = opt.minimize(ll, p0, jac=grad_ll)
            print('results ', filt, np.exp(gp.get_parameter_vector()))
            mu_gp, cov = gp.predict(flux_norm, new_mjd)
            std_gp = np.sqrt(np.diag(cov))

            if TRY_LOG:
                mu = np.exp(mu_gp * norm)
                std = np.abs(mu * std_gp * norm)
            else:
                mu = mu_gp * norm
                std = std_gp * norm
            # if filt != 'Bessell_V':

            mu_mjdspec, cov_mjdspec = gp.predict(flux_norm, mjd_spectra)
            std_mjdspec = np.sqrt(np.diag(cov_mjdspec))
            mu_mjdspec = (mu_mjdspec * norm)
            std_mjdspec = (std_mjdspec * norm)

            sudo_pts = LC_filt_extended['FilterSet'] == 'SUDO_PTS'
            self.fitted_phot[filt] = {'clipped_extended_data': [mjd, orig_flux, orig_flux_err, sudo_pts],
                                      'fit_highcadence': [new_mjd, mu, std],
                                      'fit_mjdspec': [mjd_spectra, mu_mjdspec, std_mjdspec]}
            return None
Exemplo n.º 13
0
gp = george.GP(kernel)

# Here we draw plot to show what kind of function we have when the gaussian
# process have no knowledge of our objective value of our function. We draw
# 10 samples below:
fig, ax = plt.subplots()
for pred in gp.sample(t, 10):
  ax.plot(t, pred)
ax.set_ylim(-9, 7)
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.show()

# If we choose a different kernel, the functions we sample will end up looking
# different.
matern_kernel = Matern32Kernel(1.0)
matern_gp = george.GP(matern_kernel)
fig, ax = plt.subplots()
for pred in matern_gp.sample(t, 10):
  ax.plot(t, pred)
ax.set_ylim(-9, 7)
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.show()

#
# Adding Knowledge
#
# Generate some fake, noisy data. In real world, this is the objective value
# from the function we are trying to model.
x = 10 * np.sort(np.random.rand(10))
Exemplo n.º 14
0
d = os.path.dirname
sys.path.insert(0, d(d(os.path.abspath(__file__))))
import george
from george.kernels import ExpSquaredKernel, Matern32Kernel, CosineKernel

np.random.seed(12345)

experiments = [
    ("exponential squared", [
        ("-k", "$l=0.5$", ExpSquaredKernel(0.5)),
        ("--k", "$l=1$", ExpSquaredKernel(1.0)),
        (":k", "$l=2$", ExpSquaredKernel(2.0)),
    ]),
    ("quasi-periodic", [
        ("-k", "$l=2,\,P=3$", Matern32Kernel(2.0) * CosineKernel(3.0)),
        ("--k", "$l=3,\,P=3$", Matern32Kernel(3.0) * CosineKernel(3.0)),
        (":k", "$l=3,\,P=1$", Matern32Kernel(3.0) * CosineKernel(1.0)),
    ])
]

t = np.linspace(0, 10, 500)
h, w = len(experiments) * 4, 6
fig, axes = pl.subplots(len(experiments), 1, figsize=(w, h), sharex=True)
fig.subplots_adjust(left=0.1,
                    bottom=0.1,
                    right=0.96,
                    top=0.98,
                    wspace=0.0,
                    hspace=0.05)
for ax, (name, runs) in zip(axes, experiments):
Exemplo n.º 15
0
def plot_gp_all_seasons(data):
    """
    Plot a horizontal multi-panel light curve plot for DES objects.

    Parameters
    ----------
    data : `pandas.DataFrame`
        DataFrame object contaning a full DES light curve. This should be
        obtained though [NAME OF LOCAL QUERY FUNC] or [NAME OF EASYACCESS
        WRAPPER]

    Returns
    -------
    fig : `plt.figure`
        PyPlot canvas object, if originally provided as a parameter the
        original object will be returned, otherwise new object is created.
    """
    fig, ax = plt.subplots(1, 4, figsize=(20, 5), sharey=True)

    gdf = data.groupby(('season', 'band'))
    for group, obs in gdf:
        i = int(group[0]) - 1

        if i == 3:
            label = group[1]

        else:
            label = None

        ax[i].errorbar(obs['mjd'],
                       obs['flux'],
                       yerr=obs['fluxerr'],
                       fmt='o',
                       c=band_colour(group[1]),
                       label=label)

        flux_norm = obs['flux'].mean()
        obs['flux'] /= flux_norm
        obs['fluxerr'] /= flux_norm

        gp = george.GP(Matern32Kernel(np.exp(10)))
        gp.compute(obs['mjd'], obs['fluxerr'])
        p0 = gp.kernel.vector
        opt.minimize(ll, p0, jac=grad_ll, args=(gp, obs))

        t = np.linspace(obs['mjd'].min(), obs['mjd'].max(), 500)
        mu, cov = gp.predict(obs['flux'], t)
        std = np.sqrt(np.diag(cov))

        mu *= flux_norm
        std *= flux_norm

        ax[i].fill_between(t,
                           mu + std,
                           mu - std,
                           color=band_colour(group[1]),
                           alpha=0.4)
        ax[i].plot(t, mu, color=band_colour(group[1]))

    fig.tight_layout()
    plt.legend(fontsize=16, loc='best')

    return ax
Exemplo n.º 16
0
# kernels = [1.0 * Matern(length_scale=length_scaleParameter, length_scale_bounds=(length_scaleBoundMin, length_scaleBoundMax),
# nu=1.5)]

# from george import kernels

# k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2)
# k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(2.0 / 1.3**2, 1.0)
# k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2)
# k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19)
# kernel = k1 + k2 + k3 + k4
# kernel = k1

from george.kernels import Matern32Kernel  #, ConstantKernel, WhiteKernel

# kernel = ConstantKernel(0.5, ndim=5) * Matern32Kernel(0.5, ndim=5) + WhiteKernel(0.1, ndim=5)
kernel = Matern32Kernel(0.5, ndim=4)

# hmf = np.loadtxt('Data/HMF_5Para.txt')

# ----------------------------- i/o ------------------------------------------

###################### PARAMETERS ##############################

original_dim = params.original_dim  # 2549
#intermediate_dim3 = params.intermediate_dim3 # 1600
intermediate_dim2 = params.intermediate_dim2  # 1024
intermediate_dim1 = params.intermediate_dim1  # 512
intermediate_dim0 = params.intermediate_dim0  # 256
intermediate_dim = params.intermediate_dim  # 256
latent_dim = params.latent_dim  # 10
Exemplo n.º 17
0
    if a > (x[mp] + 29.97) and a < (x[mp] + 30.03):
        print 'Phase, Mag, Mag-err:', a - x[mp], b, c
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE

############# george part #########
#print flux, dyf, ph400
Yg = np.array(flux)
Y_err = np.array(dyfg)
Xg = np.array(ph400)

norm = Yg.max()
Yg /= norm
Y_err /= norm

gp = george.GP(Matern32Kernel(500))  # + WhiteKernel(0.001))
gp.compute(Xg, Y_err)
p0 = gp.get_parameter_vector()


def ll(p):
    gp.set_parameter_vector(p)
    return -gp.lnlikelihood(Yg, quiet=True)


def grad_ll(p):
    gp.set_parameter_vector(p)
    return -gp.grad_lnlikelihood(Yg, quiet=True)


results = opt.minimize(ll, p0, jac=grad_ll)