Ejemplo n.º 1
0
    def wf(self, parameter_s='', local_ns=None):
        """ Calculate the spectral window function of the observations. 
        Type 'wf -h' for more help. """

        args = parse_arg_string('wf', parameter_s)
        if args == 1: return
        print args
        
        # use default system or user defined
        try:
            if local_ns.has_key('default') and not args['-n']:
                system = local_ns['default']
            else:
                system_name = args['-n']
                system = local_ns[system_name]
        except KeyError:
            from shell_colors import red
            msg = red('ERROR: ') + 'Set a default system or provide a system '+\
                                   'name with the -n option'
            clogger.fatal(msg)
            return
        
        try: 
            system.per
        except AttributeError:
            from shell_colors import green
            clogger.debug('Calculating periodogram to get frequencies')
            stdout_write('Calculating periodogram to get frequencies...')
            system.per = periodograms.gls(system, hifac=5)
            print green(' done')
        
        try: 
            system.wf._plot()
        except AttributeError:
            system.wf = periodograms.SpectralWindow(system.per.freq, system.time)
Ejemplo n.º 2
0
def do_it(system, training_variable, ncpu=1):

    t = system.time

    # find the quantity on which to train the GP
    i = system.extras._fields.index(training_variable) # index corresponding to the quantity
    y = system.extras[i]

    if training_variable == 'rhk': 
        training_variable_error = 'sig_rhk'
        i = system.extras._fields.index(training_variable_error) # index corresponding to the uncertainties
        yerr = system.extras[i]
    if training_variable == 'fwhm':
        if system.units == 'm/s':
            f = 2.35e-3
        else:
            f = 2.35
        yerr = f * system.error
    if training_variable == 'bis_span':
        yerr = 2.e-3*system.error

    
    # subtract mean
    y = y - np.mean(y)
    data = (t, y, yerr)

    model = GPfuncs['QuasiPeriodicJitter']

    # print y.ptp()
    initial = np.array([0.01, 1e-5, 5000, 1, 23])
    # best_p = initial
    sampler, best_p, logl = fit_gp(model, initial, data, ncpu)
    samples = sampler.flatchain 
    std = samples.std(axis=0)

    msg = yellow('    :: ') + 'Best GP hyperparameters: ' + initial.size*' %f ' % tuple(best_p)
    clogger.info(msg)
    msg = yellow('    :: ') + 'std of the chains:       ' + initial.size*' %f ' % tuple(std)
    clogger.info(msg)




    plt.figure()
    for i in range(samples.shape[1]+1):
        plt.subplot(6,1,i+1)
        if i == samples.shape[1]:
            plt.plot(logl)
        else:
            plt.plot(samples[:,i])
    plt.show()

    


    # # The positions where the prediction should be computed.
    x = np.linspace(min(t), max(t), 5000)
    x = np.hstack((x, t))
    x.sort()

    # # Plot 24 posterior samples.

    # # for s in samples[np.random.randint(len(samples), size=4)]:
    # #     # Set up the GP for this sample.
    # #     z1, z2, z3, z4 = s
    # #     kernel = z1**2 * kernels.ExpSquaredKernel(z2**2) * kernels.ExpSine2Kernel(2./z4**2, z3)
    # #     gp = george.GP(kernel)
    # #     gp.compute(t, yerr)

    # #     # Compute the prediction conditioned on the observations and plot it.
    # #     m = gp.sample_conditional(y, x)
    # #     plt.plot(x, m, color="#4682b4", alpha=0.3)

    # plot lnp solution
    best_p[1] = 0.
    kernel = model[0](*best_p)
    gp = george.GP(kernel, solver=george.HODLRSolver)
    gp.compute(t, yerr)
    print gp.lnlikelihood(y)
    # Compute the prediction conditioned on the observations and plot it.
    # t1 = time()
    m, cov = gp.predict(y, x)
    m1, cov = gp.predict(y, t)
    # print time() - t1
    plt.figure()
    plt.subplot(211)

    # phase, fwhm_sim = np.loadtxt('/home/joao/phd/data/simulated/HD41248/HD41248_simul_oversampled.rdb', unpack=True, usecols=(0, 4), skiprows=2)
    # plt.plot(phase*18.3+t[0], fwhm_sim - fwhm_sim.mean(), 'g-')

    plt.plot(x, m, color='r', alpha=0.8)
    # plt.plot(t, m1, color='r', alpha=0.8)
    # Plot the data
    plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
    # plt.plot(t, system.extras.rhk_activity - system.extras.rhk_activity.mean(), "og")
    plt.ylabel(training_variable)
    # Plot the residuals
    plt.subplot(212)
    plt.errorbar(t, y - m1, yerr=yerr, fmt=".k", capsize=0)

    plt.xlabel('Time [days]')

    plt.figure()
    ax = plt.subplot(211)
    ts = BasicTimeSeries()
    ts.time = t
    ts.vrad = y
    ts.error = yerr
    per = gls(ts)
    per._plot(axes=ax, newFig=False)
    ax = plt.subplot(212)
    ts.vrad = y-m1
    per = gls(ts)
    per._plot(axes=ax, newFig=False)

    plt.show()
    # sys.exit(0)

    # fig = triangle.corner(samples, plot_contours=False)

    enter = raw_input('Press Enter to continue: ')
    if enter == 'n':
        sys.exit(0)

    return best_p, std