Example #1
0
    def existingperiods(self):
        if not self.timereset:
            self.reset_time()

        time_seconds = self.df['t_mean'] * 60
        relativetup = WDutils.relativescales(self.df)
        flux = relativetup.flux
        ls = LombScargle(time_seconds, flux)
        freq, amp = ls.autopower(nyquist_factor=1)

        detrad = self.df['detrad']
        ls_detrad = LombScargle(time_seconds, detrad)
        freq_detrad, amp_detrad = ls_detrad.autopower(nyquist_factor=1)

        pgram_tup = WDranker_2.find_cPGRAM(ls,
                                           amp_detrad,
                                           exposure=self.exposure)
        strongest_period_tup = pgram_tup.strongest_period_tup
        if strongest_period_tup[0] != -1:
            self.period = strongest_period_tup[0]
        else:
            self.period = float('NaN')
        c_periodogram = pgram_tup.c
        if c_periodogram > 0:
            return True
        else:
            return False
Example #2
0
    def assessrecovery(self):
        exists = self.FUVexists()

        # Exposure metric already computed in init (self.c_exposure)

        # Periodogram Metric
        time_seconds = self.df['t_mean'] * 60
        #ls = LombScargle(time_seconds, self.flux_injected)
        ls = LombScargle(self.df['t_mean'], self.flux_injected)
        freq, amp = ls.autopower(nyquist_factor=1)

        detrad = self.df['detrad']
        #ls_detrad = LombScargle(time_seconds, detrad)
        ls_detrad = LombScargle(self.df['t_mean'], detrad)
        freq_detrad, amp_detrad = ls_detrad.autopower(nyquist_factor=1)
        pgram_tup = WDranker_2.find_cPGRAM(ls,
                                           amp_detrad,
                                           exposure=self.exposure)
        # Return 0,1 rseult of recovery
        c_periodogram = pgram_tup.c
        ditherperiod_exists = pgram_tup.ditherperiod_exists

        # Welch Stetson Metric
        if exists:
            c_ws = WDranker_2.find_cWS(self.t_mean, self.t_mean_fuv,
                                       self.flux_injected,
                                       self.flux_injected_fuv, self.flux_err,
                                       self.flux_err_fuv, ditherperiod_exists,
                                       self.FUVexists())
        else:
            c_ws = WDranker_2.find_cWS(self.t_mean, None, self.flux_injected,
                                       None, self.flux_err, None,
                                       ditherperiod_exists, self.FUVexists())

        # RMS Metric --- have to 'unscale' the magnitudes
        converted_flux = [f * self.original_median for f in self.flux_injected]
        injectedmags = [WDutils.flux_to_mag('NUV', f) for f in converted_flux]
        sigma_mag = median_absolute_deviation(injectedmags)
        c_magfit = WDranker_2.find_cRMS(self.mag, sigma_mag, 'NUV')

        # Weights:
        w_pgram = 1
        w_expt = .2
        w_WS = .3
        w_magfit = .25

        C = ((w_pgram * c_periodogram) + (w_expt * self.c_exposure) +
             (w_magfit * c_magfit) + (w_WS * c_ws))

        if C > self.cutoff:
            return 1
        else:
            return 0
Example #3
0
def periodcheck(thistime, thisflux, mflags):
    #dates,flux,flux_pcor,flux_ptcor,mflags = readpsfk2cor(k2name)
    ig = (mflags == 0)
    #
    # k2sc documentation:
    # https://github.com/OxES/k2sc/blob/master/relase_readme.txt
    # indicates that mflags ==0 would be good data.
    #
    # This section, not used, shows how to do sigma clipping. Unncessary since mgflags already
    # applies a ~4-5 sigma clip.
    #sigma clipping stuff ; see http://docs.astropy.org/en/stable/stats/robust.html#sigma-clipping
    #from astropy.stats import sigma_clip
    #filtered_data = sigma_clip(flux_ptcor, sigma=3, iters=10)
    # that would be a mask
    #
    # DISCOVERY: WILL CRASH IF ALL DATA FLAGGED AS BAD!!!
    # SOLUTION: DON'T GIVE IT THOSE FILES!
    #
    # Periodogram stuff
    # search good data only in period range 1 hour to 10 days
    ls = LombScargle(thistime[ig], thisflux[ig])
    frequency, power = ls.autopower(maximum_frequency=24.0,
                                    minimum_frequency=0.1)
    #
    best_frequency = frequency[np.argmax(power)]
    best_fap = ls.false_alarm_probability(power.max())
    #
    # Calculate the model if desired
    #y_fit = ls.model(dates, best_frequency)
    #plt.plot(t_fit,y_fit,'k-')
    #
    return frequency, power, best_frequency, best_fap
Example #4
0
def main():
    df = pd.read_fwf(data_url,
                     colspecs=((0, 6), (7, 27)),
                     header=1,
                     names=('orbnum', 'utc'),
                     parse_dates=[1],
                     date_parser=_dp)

    sec_of_day = 86400.0
    df['period'] = df.utc.diff(periods=1).astype('timedelta64[s]')/sec_of_day

    ax = plt.axes()
    plt.plot(df.utc, df.period, marker='+')
    plt.xlabel('date/time')
    plt.ylabel('period')
    plt.ylim(10.0, 14.0)

    ax.xaxis.set_major_locator(mdates.MonthLocator(interval=4))
    ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))

    plt.show()


    date0 = pd.datetime.strptime('2016-04-17', '%Y-%m-%d')
    df_m = df[ df.utc > date0 ].copy()
    df_m['delta'] = df_m['utc'] - date0
    df_m['et'] = df_m['delta'].dt.total_seconds()/sec_of_day


    from astropy.stats import LombScargle
    ls = LombScargle(df_m.et, df_m.period, fit_mean=True)
    freq, power = ls.autopower()
    fmax = freq[np.argmax(power)]
    print('frequency(Lomb-Scargle): ', 1/fmax)
    print('frequency(rev/2):        ', 224.701/2.0)
Example #5
0
def lomb_scargle_estimator(x, y, yerr=None,
                           min_period=None, max_period=None,
                           filter_period=None,
                           max_peaks=2,
                           **kwargs):
    """Estimate period of a time series using the periodogram

    Args:
        x (ndarray[N]): The times of the observations
        y (ndarray[N]): The observations at times ``x``
        yerr (Optional[ndarray[N]]): The uncertainties on ``y``
        min_period (Optional[float]): The minimum period to consider
        max_period (Optional[float]): The maximum period to consider
        filter_period (Optional[float]): If given, use a high-pass filter to
            down-weight period longer than this
        max_peaks (Optional[int]): The maximum number of peaks to return
            (default: 2)

    Returns:
        A dictionary with the computed ``periodogram`` and the parameters for
        up to ``max_peaks`` peaks in the periodogram.

    """
    if min_period is not None:
        kwargs["maximum_frequency"] = 1.0 / min_period
    if max_period is not None:
        kwargs["minimum_frequency"] = 1.0 / max_period

    # Estimate the power spectrum
    model = LombScargle(x, y, yerr)
    freq, power = model.autopower(method="fast", normalization="psd", **kwargs)
    power /= len(x)
    power_est = np.array(power)

    # Filter long periods
    if filter_period is not None:
        freq0 = 1.0 / filter_period
        filt = 1.0 / np.sqrt(1 + (freq0 / freq) ** (2*3))
        power *= filt

    # Find and fit peaks
    peak_inds = (power[1:-1] > power[:-2]) & (power[1:-1] > power[2:])
    peak_inds = np.arange(1, len(power)-1)[peak_inds]
    peak_inds = peak_inds[np.argsort(power[peak_inds])][::-1]
    peaks = []
    for i in peak_inds[:max_peaks]:
        A = np.vander(freq[i-1:i+2], 3)
        w = np.linalg.solve(A, np.log(power[i-1:i+2]))
        sigma2 = -0.5 / w[0]
        freq0 = w[1] * sigma2
        peaks.append(dict(
            log_power=w[2] + 0.5*freq0**2 / sigma2,
            period=1.0 / freq0,
            period_uncert=np.sqrt(sigma2 / freq0**4),
        ))

    return dict(
        periodogram=(freq, power_est),
        peaks=peaks,
    )
Example #6
0
def lomb_scargle(t,
                 y,
                 dy=None,
                 minfreq=1. / 365,
                 maxfreq=1 / 2,
                 npeaks=0,
                 peaktol=0.05):
    # periodogram
    if isinstance(dy, np.ndarray):
        ls = LombScargle(t, y, dy)
    else:
        ls = LombScargle(t, y)

    frequency, power = ls.autopower(minimum_frequency=minfreq,
                                    maximum_frequency=maxfreq,
                                    samples_per_peak=10)
    probabilities = [0.1, 0.05, 0.01]
    try:
        pp = ls.false_alarm_level(probabilities)
    except:
        pp = [-1, -1, -1]
    # power probabilities
    # This tells us that to attain a 10% false alarm probability requires the highest periodogram peak to be approximately XX; 5% requires XX, and 1% requires XX.
    if npeaks > 0:

        # find peaks in periodogram
        peaks, amps = find_peaks(power, height=peaktol)
        Nterms = npeaks  #min(npeaks,len(peaks))
        fdata = np.zeros((Nterms, 3))  # frequency, amplitude, shift

        # fit amplitudes to each peak frequency
        if Nterms > 0 and npeaks > 0:

            # sort high to low
            peaks = peaks[np.argsort(amps['peak_heights'])[::-1]]

            # estimate priors
            for i in range(min(npeaks, len(peaks))):
                fdata[i, 0] = frequency[int(peaks[i])]
                fdata[i, 1] = np.sort(amps['peak_heights'])[::-1][i] * maxavg(
                    y)  #amplitude estimate
                fdata[i, 2] = 0  # phase shift estimate

            # ignore fitting frequencies
            priors = fdata.flatten()
            bounds = np.array([[0, 1], [0, max(y) * 1.5], [0, 2 * np.pi]] *
                              Nterms).T

            def fit_wave(pars):
                wave = make_sin(t, pars)
                return (y - wave) / y

            res = least_squares(fit_wave, x0=priors, bounds=bounds)

            fdata = res.x.reshape(Nterms, -1)

        return frequency, power, fdata
    else:
        return frequency, power
Example #7
0
def visibility_MC(P, RV, t):
    LS = LombScargle(t.value, RV.value)
    frequency, power = LS.autopower()
    false_alarm = LS.false_alarm_level(0.01, method='bootstrap')
    prob = LS.power(1. / P.value)
    if power.max() > false_alarm:
        return True
    else:
        return False
Example #8
0
def visibility(Planet, Observations):
    LS = LombScargle(Observations.t.value, Observations.RV.value)
    frequency, power = LS.autopower()
    false_alarm = LS.false_alarm_level(0.01, method='bootstrap')
    prob = LS.power(1. / Planet.P.value)
    if power.max() > false_alarm:
        return True
    else:
        return False
def getLombScarglePeriodogram(peaks,
                              tachogram,
                              minFreq=0,
                              maxFreq=.4,
                              norm='standard'):
    ls = LombScargle(peaks, tachogram)
    freq, power = ls.autopower(minimum_frequency=minFreq,
                               maximum_frequency=maxFreq,
                               normalization=norm)
    print(sumWithNan(power))
    return (freq, power)
Example #10
0
def get_pgram(time,flux, min_p=1./24., max_p = 20.):
    finite = np.isfinite(flux)
    ls = LombScargle(time[finite],flux[finite],normalization='psd')
    
    frequency, power = ls.autopower(minimum_frequency=1./max_p,maximum_frequency=1./min_p,samples_per_peak=5)

    norm = np.nanstd(flux * 1e6)**2 / np.sum(power) # normalize according to Parseval's theorem - same form used by Oliver Hall in lightkurve
    fs = np.mean(np.diff(frequency*11.57)) # ppm^/muHz

    power *= norm/fs

    spower = gaussfilt(power,15)

    return frequency, power, spower 
Example #11
0
def hybrid_periodogram(times, magnitudes, errors, give_ls=False):
    """
    computes the power or periodicity (a hybrid method which uses both
    the Lomb-Scargle method and the Laffler Kinnman method) of a set of
    magnitude data (in one filter) over a series of periods.

    Parameters
    ---------
    times : numpy array or list
        the list of times for the data

    magnitudes : numpy array or list
        the list of magnitudes for the data

    errors : numpy array or list
        the list of errors for the data


    Returns
    -------
    period : list
        a list of periods for which periodicity was computed

    menorah : list
        a list of periodicities based on both the periodicities returned
        by the Lomb-Scargle and Laffler-Kinnman methods at each period.
        Called menorah because the symbol used to represent this value is an upper-case
        psi, which I believe looks like a menorah with most of the candle
        holders broken off.

    """
    # Lomb-Scargle
    LS = LombScargle(times, magnitudes)
    LS_frequency, LS_power = LS.autopower()
    # Converts frequency to period
    period = 1 / LS_frequency
    # Laffler-Kinman
    theta_list = []
    # for each period that was used in LombScargle, this computes Theta
    for p in period:
        theta_list.append(Theta(p, times, magnitudes, errors))
    theta_array = np.array(theta_list)
    # Finding Menorah (otherwise known as upper-case psi)
    menorah = (2 * LS_power) / theta_array
    if give_ls == True:
        return(period, menorah, LS)
    else:
        return period, menorah
Example #12
0
def ls(t, c, lowf=.01, highf=1, spp=10, ylim=(0, .5), xlow=0):
    '''
    Plots Lomb-Scargle Periodogram and return top 10
    highest-powered periods
    
    Inputs:
    -------
    t:
    c:
    lowf: minimum_frequency to search
    highf: maximum frequency to search
    spp: samples per peak
    ylim: tuple of limits of plot
    xlow: int or float of lower end of x limit of plot
    
    Outputs:
    --------
    first 10 values of DataFrame containing frequencies, periods,
    and power in descending order by power
    '''
    fig = plt.figure(figsize=(10, 3))
    gs = plt.GridSpec(2, 2)

    dy = np.sqrt(c)

    ax = fig.add_subplot(gs[:, 0])
    ax.errorbar(t, c, dy, fmt='ok', ecolor='gray', markersize=3, capsize=0)
    ax.set(xlabel='time', ylabel='signal', title='Data and Model')

    ls = LombScargle(t, c)
    freq, power = ls.autopower(normalization='standard',
                               minimum_frequency=lowf,
                               maximum_frequency=highf,
                               samples_per_peak=spp)

    plt.plot(1. / freq, power, color='rebeccapurple')
    plt.xlabel('Period (s)')
    plt.ylabel('Power')
    plt.xlim(xlow, 1 / lowf)
    plt.ylim(ylim)

    best_freq = freq[np.argmax(power)]
    print(1. / best_freq)
    frame = pd.DataFrame(columns=['f', 'pow', 'pd'])
    frame['f'] = freq
    frame['pow'] = power
    frame['pd'] = 1. / freq
    return (frame.sort_values(by='pow', ascending=False)[:10])
Example #13
0
def periodogram(bjd, flux, flux_err):
    t = (bjd - bjd[0])*24.0
    mean = np.mean(flux)
    flux = flux - mean
    dt = [ t[i+1] - t[i-1] for i in range(1,len(t)-1)]
    fmax = 1.0/np.median(dt)
    fmin = 2.0/(max(t))
    ls = LombScargle(t, flux, flux_err)
    #Oversampling a factor of 10 to achieve frequency resolution
    freq, power = ls.autopower(minimum_frequency=fmin,
                               maximum_frequency=fmax,
                               samples_per_peak=10)
    best_f = freq[np.argmax(power)]
    period = 1.0/best_f #period from the LS periodogram
    fap_p = ls.false_alarm_probability(power.max())

    amp = np.sqrt(power)/mean
    return np.array(freq), np.array(amp), period, fap_p
Example #14
0
def plot_LombScargle(Planet, Observations):
    LS = LombScargle(Observations.t.value, Observations.RV.value,
                     Observations.errRV.value)
    frequency, power = LS.autopower()
    plt.plot(frequency, power)
    plt.xlabel('frequency')
    plt.ylabel('Lomb-Scargle Power')
    plt.axvline(1. / Planet.P.value, lw=2, color='red', alpha=0.4)
    plt.axhline(LS.false_alarm_level(0.1, method='bootstrap'),
                linestyle=':',
                lw=2,
                color='black',
                alpha=0.4)
    plt.axhline(LS.false_alarm_level(0.01, method='bootstrap'),
                linestyle='-',
                lw=2,
                color='black',
                alpha=0.4)
    plt.ylim([0., 1.])
    def plot_lomb_scargle(self):
        from astropy.stats import LombScargle

        ls = LombScargle(self.t, self.mag, self.dmag)
        frequency, power = ls.autopower()
        print('Maximum power: {}, occured at time period : {} '.format(max(power),1. / frequency[np.argmax(power)]))

        fig, ax = plt.subplots(1, 2, figsize=(12, 5))
        fig.suptitle('Lomb-Scargle Periodogram for LINEAR object {0}'.format(self.id))
        fig.subplots_adjust(bottom=0.12, left=0.07, right=0.95)

        # plot the raw data
        ax[0].plot(frequency, power)
        ax[0].set(xlabel='Frequency',ylabel='Lomb-Scargle Power')

        # plot the periodogram
        ax[1].plot(1. / frequency, power)
        ax[1].set(xlim=(0,100),xlabel='period (days)',ylabel='Lomb-Scargle Power')
        return fig, ax
def bootstrap(time,flux,error,size,min_freq,max_freq):
    """
    Function that performs the bootstraping
    :params: time: time series array (x value)
    :params: flux: time series array (y value)
    :params: error: error on the y 
    :parms: size: 
    :params: min_freq: minimum frequency to search 
    :params: min_freq: maximum frequency to search

    :out: per_sum: array with the sum of all the Periodograms
    :out: peaks: array with frequency of each peak 
    """


    flux_distributions = np.zeros((len(flux),size))
    for i in range(0,len(flux)):
        flux_distributions[i] = np.random.normal(flux[i],error[i],size)

    peaks = []
    per_sum = 0  
    peaks_len = 0
    for i in range(size):
        ls = LombScargle(time, flux_distributions[:,i], error)
        threshold = ls.false_alarm_level(0.99)
        freq, PLS = ls.autopower(minimum_frequency=min_freq,maximum_frequency=max_freq,samples_per_peak=1000)
        if type(per_sum) is int:
            per_sum = PLS
        else: 
            per_sum += PLS

        peaks.append(freq[argrelextrema(PLS,np.greater)])

        if i%(size/100) == 0:
            print("Progress: %i %%"%(i/size*100))

    peaks = np.concatenate(peaks)
    return(per_sum,peaks)
Example #17
0
def assert_lsperiod_is_approx(time,
                              flux,
                              err,
                              target_period,
                              significant=4,
                              verbose=True):
    """
    Given a light curve, require the Lomb Scargle period to be near a target
    value.

    args:
        time, flux, err: np.ndarrays
        target_period: float
        significant: int, number of significant digits used in assertion
        statement.
    """
    from astropy.stats import LombScargle

    period_min = target_period / 10
    period_max = target_period * 10

    ls = LombScargle(time, flux, err)
    freq, power = ls.autopower(minimum_frequency=1 / period_max,
                               maximum_frequency=1 / period_min,
                               samples_per_peak=20)

    ls_fap = ls.false_alarm_probability(power.max())

    ls_period = 1 / freq[np.argmax(power)]

    if verbose:
        msg = (
            f'LS Period: got {ls_period:.4f} d, target {target_period:.4f} d')
        print(msg)

    assert_approx_equal(ls_period, target_period, significant=significant)
Example #18
0
from astropy.io import fits
from astropy.stats import LombScargle
from astropy.table import Table
from sklearn.utils import resample

radio = Table.read('target.phot.ll.txt', format='ascii')
period_bs = fits.getdata('period_bs_fullsamp.fits')
period_mc = fits.getdata('period_mc_1e5samples.fits')

minfreq = 2.0
maxfreq = 20
samppeak = 100

ls = LombScargle(radio['mjd'], radio['re'])      

frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq, samples_per_peak=samppeak)
best_frequency = frequency[np.argmax(power)]
best_period = (1. / best_frequency)*24.
fap = ls.false_alarm_probability(power.max())

fig = plt.figure(figsize=(20, 10))
fig.tight_layout()

grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.4)

ax1 = fig.add_subplot(grid[ :, 0])
ax1.plot((1. / frequency)*24., power, label='peak period:'+'{:10.3f}'.format(best_period)+' hours', color='orange')
ax1.set_xlabel('period (hr)', fontsize=30)
ax1.set_ylabel('periodogram power', fontsize=30)
ax1.set_xlim((1, 6))
ax1.text(0.5, 0.037, 'A', size=40)
Example #19
0
def main(csvname,
         makeplot,
         w_pgram=1,
         w_expt=.2,
         w_WS=.30,
         w_magfit=.25,
         comment=False):

    ###Path assertions###
    catalogpath = ("/home/dmrowan/WhiteDwarfs/" +
                   "Catalogs/MainCatalog_reduced_simbad_asassn.csv")
    sigmamag_path_NUV = "Catalog/SigmaMag_NUV.csv"
    sigmamag_path_FUV = "Catalog/SigmaMag_FUV.csv"
    assert (os.path.isfile(csvname))
    assert (os.path.isfile(catalogpath))
    assert (os.path.isfile(sigmamag_path_NUV))
    assert (os.path.isfile(sigmamag_path_FUV))
    assert (os.path.isdir('PDFs'))
    assert (os.path.isdir('Output'))

    #Find source name from csvpath
    csvpath = csvname
    for i in range(len(csvpath)):
        character = csvpath[i]
        if character == 'c':
            endidx = i - 5
            break
    source = csvpath[0:endidx]

    #Grab the band (also checks we have source csv)
    if csvpath[-7] == 'N':
        band = 'NUV'
        band_other = 'FUV'
    elif csvpath[-7] == 'F':
        band = 'FUV'
        band_other = 'NUV'
    else:
        print("Not source csv, skipping")
        return
    assert (band is not None)

    bandcolors = {'NUV': 'red', 'FUV': 'blue'}
    alldata = pd.read_csv(csvpath)
    ###Alldata table corrections###
    alldata = WDutils.df_reduce(alldata)

    #Fix rows with incorrecct t_means by averaging t0 and t1
    alldata = WDutils.tmean_correction(alldata)

    ###Apparent Magnitude###
    m_ab = np.nanmedian(alldata['mag_bgsub'])
    sigma_mag_all = median_absolute_deviation(alldata['mag_bgsub'])
    magdic = {"mag": [m_ab], "sigma": [sigma_mag_all], "weight": [1]}
    #c_magfit_all = find_cRMS(m_ab, sigma_mag_all, band)

    ###See if we have any data in the other band###
    if band == 'NUV':
        csvpath_other = csvpath.replace('NUV', 'FUV')
    else:
        csvpath_other = csvpath.replace('FUV', 'NUV')
    #Look for file in GALEXphot/LCs
    csvpath_other = f"/home/dmrowan/WhiteDwarfs/GALEXphot/LCs/{csvpath_other}"
    if os.path.isfile(csvpath_other):
        other_band_exists = True
        alldata_other = pd.read_csv(csvpath_other)
    else:
        other_band_exists = False

    if other_band_exists:
        #print(f"Generating additional LC data for {band_other} band")
        alldata_other = pd.read_csv(csvpath_other)
        alldata_other = WDutils.df_fullreduce(alldata_other)
        #Fix rows with weird t_mean time
        alldata_other = WDutils.tmean_correction(alldata_other)

        #Make correction for relative scales
        relativetup_other = WDutils.relativescales(alldata_other)
        alldata_tmean_other = relativetup_other.t_mean
        alldata_flux_bgsub_other = relativetup_other.flux
        alldata_flux_bgsub_err_other = relativetup_other.err

    ###Query Catalogs###
    bigcatalog = pd.read_csv(catalogpath)
    bigcatalog_idx = WDutils.catalog_match(source, bigcatalog)
    if len(bigcatalog_idx) == 0:
        print(f"{source} not in catalog")
        with open("../brokensources.txt", 'a') as f:
            f.write(f"source \n")
        return
    else:
        bigcatalog_idx = bigcatalog_idx[0]

    spectype = bigcatalog['spectype'][bigcatalog_idx]
    variability = bigcatalog['variability'][bigcatalog_idx]
    binarity = bigcatalog['binarity'][bigcatalog_idx]
    hasdisk = bigcatalog['hasdisk'][bigcatalog_idx]
    simbad_name = bigcatalog['SimbadName'][bigcatalog_idx]
    simbad_types = bigcatalog['SimbadTypes'][bigcatalog_idx]
    gmag = bigcatalog['gaia_g_mean_mag'][bigcatalog_idx]

    ###Break the alldata table into exposure groups###
    data = WDutils.dfsplit(alldata, 100)
    print(f"Dividing {source} {band} data into {len(data)} exposure groups")

    #Initialize Lists
    df_number = 1
    c_vals = []
    c_ws_vals = []
    c_magfit_vals = []
    c_exp_vals = []
    c_pgram_vals = []
    df_numbers_run = []
    biglc_time = []
    biglc_counts = []
    biglc_err = []
    strongest_periods_list = []
    fap_list = []
    ditherperiod_exists = False

    ###Loop through each exposure group###
    for df in data:
        if len(df['t1']) == 0:
            df_number += 1
            continue
        ###Dataframe corrections###
        #Reset first time in t_mean to be 0
        firsttime_mean = df['t_mean'][df.index[0]]
        df['t_mean'] = df['t_mean'] - firsttime_mean

        #Find exposure, c_exposure
        exposuretup = find_cEXP(df)
        firsttime = exposuretup.firsttime
        lasttime = exposuretup.lasttime
        exposure = exposuretup.exposure
        c_exposure = exposuretup.c_exposure
        c_exp_vals.append(c_exposure)

        #Filter for red and blue points
        coloredtup = WDutils.ColoredPoints(df)
        redpoints = coloredtup.redpoints
        bluepoints = coloredtup.bluepoints
        droppoints = np.unique(np.concatenate([redpoints, bluepoints]))

        #Corrections for relative scales
        relativetup = WDutils.relativescales(df)
        t_mean = relativetup.t_mean
        flux_bgsub = relativetup.flux
        flux_bgsub_err = relativetup.err

        if len(redpoints) != 0:
            t_mean_red = [t_mean[ii] for ii in redpoints]
            flux_bgsub_red = [flux_bgsub[ii] for ii in redpoints]
            flux_bgsub_err_red = [flux_bgsub_err[ii] for ii in redpoints]
        if len(bluepoints) != 0:
            t_mean_blue = [t_mean[ii] for ii in bluepoints]
            flux_bgsub_blue = [flux_bgsub[ii] for ii in bluepoints]
            flux_bgsub_err_blue = [flux_bgsub_err[ii] for ii in bluepoints]

        #Drop red and blue points
        df_reduced = df.drop(index=droppoints)
        df_reduced = df_reduced.reset_index(drop=True)

        if df_reduced.shape[0] < 10:
            df_number += 1
            continue

        #Drop bad first and last points
        df_reduced = WDutils.df_firstlast(df_reduced)

        #Have to do this again to get the reduced indicies
        relativetup_reduced = WDutils.relativescales(df_reduced)
        t_mean = relativetup_reduced.t_mean
        flux_bgsub = relativetup_reduced.flux
        flux_bgsub_err = relativetup_reduced.err

        #Math points in other band
        if other_band_exists:
            idx_exposuregroup_other = np.where(
                (alldata_tmean_other > firsttime)
                & (alldata_tmean_other < lasttime))[0]

            t_mean_other = np.array(
                alldata_tmean_other[idx_exposuregroup_other] - firsttime_mean)

            flux_bgsub_other = np.array(
                alldata_flux_bgsub_other[idx_exposuregroup_other])

            flux_bgsub_err_other = np.array(
                alldata_flux_bgsub_err_other[idx_exposuregroup_other])

        ###Periodogram Creation###
        #Fist do the periodogram of the data
        ls = LombScargle(t_mean, flux_bgsub)
        freq, amp = ls.autopower(nyquist_factor=1)

        #Periodogram for dither information
        detrad = df_reduced['detrad']
        ls_detrad = LombScargle(t_mean, detrad)
        freq_detrad, amp_detrad = ls_detrad.autopower(nyquist_factor=1)

        #Periodogram for expt information
        exptime = df_reduced['exptime']
        ls_expt = LombScargle(t_mean, exptime)
        freq_expt, amp_expt = ls_expt.autopower(nyquist_factor=1)

        #Periodogram metric
        pgram_tup = find_cPGRAM(ls, amp_detrad, exposure)
        c_periodogram = pgram_tup.c
        ditherperiod_exists = pgram_tup.ditherperiod_exists
        strongest_period_tup = pgram_tup.strongest_period_tup
        if strongest_period_tup[0] != -1:
            strongest_periods_list.append(strongest_period_tup)
            fap_list.append(strongest_period_tup[2])

        c_pgram_vals.append(c_periodogram)
        sspeaks = pgram_tup.sspeaks

        #Welch Stetson Variability Metric.
        if other_band_exists:
            c_ws = find_cWS(t_mean, t_mean_other, flux_bgsub, flux_bgsub_other,
                            flux_bgsub_err, flux_bgsub_err_other,
                            ditherperiod_exists, other_band_exists)
        else:
            c_ws = find_cWS(t_mean, None, flux_bgsub, None, flux_bgsub_err,
                            None, ditherperiod_exists, other_band_exists)

        c_ws_vals.append(c_ws)

        #Sigma Mag Metric
        ###Grab magnitude information###
        df_sigma_mag = median_absolute_deviation(df_reduced['mag_bgsub'])
        magdic["mag"].append(m_ab)
        magdic["sigma"].append(df_sigma_mag)
        magdic["weight"].append(.25)
        c_magfit = find_cRMS(m_ab, df_sigma_mag, band)
        c_magfit_vals.append(c_magfit)

        ###Autocorrelation results###
        autocorr_result = selfcorrelation(flux_bgsub)

        #####GENERATE RATING#####
        print(c_periodogram, c_exposure, c_magfit, c_ws)
        C = ((w_pgram * c_periodogram) + (w_expt * c_exposure) +
             (w_magfit * c_magfit) + (w_WS * c_ws))
        print(f"Exposure group {df_number} ranking: {C}")
        c_vals.append(C)

        if makeplot:
            ###Generate plot/subplot information###
            fig = plt.figure(df_number, figsize=(16, 12))
            gs.GridSpec(4, 4)
            fig.tight_layout(rect=[0, 0.03, 1, 0.95])
            fig.suptitle(f"Exposure group {df_number} with {exposure}s \n"
                         f"Ranking: {C}")
            #Subplot for LC
            plt.subplot2grid((4, 4), (0, 0), colspan=4, rowspan=2)
            #Convert to JD here as well
            jd_t_mean = [
                gphoton_utils.calculate_jd(t + firsttime_mean) for t in t_mean
            ]
            plt.errorbar(jd_t_mean,
                         flux_bgsub,
                         yerr=flux_bgsub_err,
                         color=bandcolors[band],
                         marker='.',
                         ls='',
                         zorder=4,
                         label=band)
            plt.axhline(alpha=.3, ls='dotted', color=bandcolors[band])
            if len(redpoints) != 0:
                jd_t_mean_red = [
                    gphoton_utils.calculate_jd(t + firsttime_mean)
                    for t in t_mean_red
                ]
                plt.errorbar(jd_t_mean_red,
                             flux_bgsub_red,
                             yerr=flux_bgsub_err_red,
                             color='#808080',
                             marker='.',
                             ls='',
                             zorder=2,
                             alpha=.5,
                             label='Flagged')
            if len(bluepoints) != 0:
                jd_t_mean_blue = [
                    gphoton_utils.calculate_jd(t + firsttime_mean)
                    for t in t_mean_blue
                ]
                plt.errorbar(jd_t_mean_blue,
                             flux_bgsub_blue,
                             yerr=flux_bgsub_err_blue,
                             color='green',
                             marker='.',
                             ls='',
                             zorder=3,
                             alpha=.5,
                             label='SigmaClip')
            if other_band_exists:
                jd_t_mean_other = [
                    gphoton_utils.calculate_jd(t + firsttime_mean)
                    for t in t_mean_other
                ]
                plt.errorbar(jd_t_mean_other,
                             flux_bgsub_other,
                             yerr=flux_bgsub_err_other,
                             color=bandcolors[band_other],
                             marker='.',
                             ls='',
                             zorder=1,
                             label=band_other,
                             alpha=.25)

            ax = plt.gca()
            ax = WDutils.plotparams(ax)
            plt.title(f"{band} light curve")
            plt.xlabel('Time JD')
            plt.ylabel('Flux mmi')
            plt.legend(loc=1)
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])

            #Subplot for autocorr
            plt.subplot2grid((4, 4), (2, 2), colspan=1, rowspan=2)
            plt.plot(autocorr_result, 'b-', label='data')
            plt.title('Autocorrelation')
            plt.xlabel('Delay')
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])
            ax = plt.gca()
            ax = WDutils.plotparams(ax)

            #Subplot for periodogram
            plt.subplot2grid((4, 4), (2, 0), colspan=2, rowspan=2)
            ax = plt.gca()
            ax = WDutils.plotparams(ax)
            ax.plot(freq, amp, 'g-', label='Data')
            ax.plot(freq_detrad, amp_detrad, 'r-', label="Detrad", alpha=.25)
            ax.plot(freq_expt, amp_expt, 'b-', label="Exposure", alpha=.25)
            ax.set_title(f"{band} Periodogram")
            ax.set_xlabel('Freq [Hz]')
            ax.set_ylabel('Amplitude')
            ax.set_xlim(0, np.max(freq))
            try:
                ax.set_ylim(0, np.max(amp) * 2)
            except:
                print("Issue with periodogram axes")

            top5amp_detrad = heapq.nlargest(5, amp_detrad)
            bad_detrad = pgram_tup.bad_detrad
            if any(np.isnan(x) for x in top5amp_detrad):
                print(f"No detrad peaks for exposure group {df_number}")
            else:
                for tup in bad_detrad:
                    ax.axvspan(tup[0], tup[1], alpha=.1, color='black')

            #ax[0][1].axvline(x=nyquistfreq, color='r', ls='--')
            for level in [.05]:
                ax.axhline(ls.false_alarm_level(level),
                           color='black',
                           alpha=.5,
                           ls='--',
                           label=f"FAP: {level}")
            ax.axhline(ls.false_alarm_level(.25),
                       color='black',
                       alpha=.5,
                       ls=':',
                       label='FAP: 0.25')

            ax.legend()
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])

            #Subplot for png image
            plt.subplot2grid((4, 4), (2, 3), colspan=1, rowspan=2)
            pngfile = (
                f"/home/dmrowan/WhiteDwarfs/GALEXphot/pngs/{source}.png")
            img1 = mpimg.imread(pngfile)
            plt.imshow(img1)
            #Turn of axes
            #ax[1][1].axis('off')
            plt.axis('off')
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])

            saveimagepath = f"PDFs/{source}-{band}qlp{df_number}.pdf"
            fig.savefig(saveimagepath)

            #Close figure
            fig.clf()
            plt.close('all')

        #Information for big light curve
        biglc_time.append(np.nanmean(t_mean + firsttime_mean))
        biglc_counts.append(np.nanmean(flux_bgsub))
        biglc_err.append(np.std(flux_bgsub_err) / np.sqrt(df_reduced.shape[0]))

        df_numbers_run.append(df_number)
        df_number += 1

    ###Find the total rank, best rank, and best group###
    totalrank = np.sum(c_vals)
    if len(c_vals) != 0:
        bestrank = max(c_vals)
        idx_best = np.where(np.array(c_vals) == bestrank)[0][0]
        best_expt_group = df_numbers_run[idx_best]
        c_ws_best = c_ws_vals[idx_best]
        c_magfit_best = c_magfit_vals[idx_best]
        c_ws_max = max(c_ws_vals)
        c_exp_max = max(c_exp_vals)
        c_pgram_max = max(c_pgram_vals)
    else:
        bestrank = 0
        idx_best = 0
        best_expt_group = 0
        c_ws_best = 0
        c_magfit_best = 0
        c_ws_max = 0
        c_exp_max = 0
        c_pgram_max = 0
    print(f"{source} Best Rank: {bestrank} in group {best_expt_group}")

    ###Get most prevalent period from strongest_periods_list###
    all_periods = [tup[0] for tup in strongest_periods_list]
    all_ratios = [tup[1] for tup in strongest_periods_list]
    if len(all_periods) > 1:
        period_to_save = all_periods[np.where(
            np.asarray(all_ratios) == max(all_ratios))[0][0]]
        best_fap = min(fap_list)
    elif len(all_periods) == 1:
        period_to_save = all_periods[0]
        period_to_save = round(period_to_save, 3)
        best_fap = min(fap_list)
    else:
        period_to_save = ''
        best_fap = ''

    #Generate output csv with pandas
    outputdic = {
        "SourceName": [source],
        "Band": [band],
        "TotalRank": [round(totalrank, 3)],
        "BestRank": [round(bestrank, 3)],
        "Comment": [""],
        "ABmag": [round(m_ab, 2)],
        "StrongestPeriod": [period_to_save],
        "False Alarm Prob.": [best_fap],
        "WS metric": [c_ws_best],
        "c_magfit": [c_magfit_best],
        "SimbadName": [simbad_name],
        "SimbadTypes": [simbad_types],
        "Spectype": [spectype],
        "KnownVariable": [variability],
        "Binarity": [binarity],
        "Hasdisk": [hasdisk],
        "c_ws_max": [c_ws_max],
        "c_exp_max": [c_exp_max],
        "c_pgram_max": [c_pgram_max],
    }
    dfoutput = pd.DataFrame(outputdic)
    dfoutput.to_csv(f"Output/{source}-{band}-output.csv", index=False)

    if makeplot:
        #####Generate multiplage pdf#####

        ###Page 1###
        #Drop flagged rows from alldata
        alldata_flag_bool_vals = [
            WDutils.badflag_bool(x) for x in alldata['flags']
        ]
        alldata_flag_idx = np.where(
            np.array(alldata_flag_bool_vals) == True)[0]
        alldata = alldata.drop(index=alldata_flag_idx)
        alldata = alldata.reset_index(drop=True)
        #Make the correction for relative scales
        alldata_tmean = alldata['t_mean']
        alldata_flux_bgsub = alldata['flux_bgsub']
        alldata_medianflux = np.nanmedian(alldata_flux_bgsub)
        alldata_flux_bgsub = (alldata_flux_bgsub / alldata_medianflux) - 1.0
        alldata_flux_bgsub_err = (alldata['flux_bgsub_err'] /
                                  alldata_medianflux)

        #Convert to JD
        alldata_jd_tmean = [
            gphoton_utils.calculate_jd(t) for t in alldata_tmean
        ]
        biglc_jd_time = [gphoton_utils.calculate_jd(t) for t in biglc_time]
        if other_band_exists:
            alldata_jd_tmean_other = [
                gphoton_utils.calculate_jd(t) for t in alldata_tmean_other
            ]

        #See if ASASSN data exists:
        if type(bigcatalog['ASASSNname'][bigcatalog_idx]) != str:
            asassn_exists = False
        else:
            asassn_exists = True
            asassn_name = bigcatalog['ASASSNname'][bigcatalog_idx]

        #Plot ASASSN data
        if asassn_exists:
            print("ASASSN data exists")
            figall = plt.figure(figsize=(16, 12))
            gs.GridSpec(2, 2)
            figall.tight_layout(rect=[0, .03, 1, .95])
            #Plot total light curve
            plt.subplot2grid((2, 2), (0, 0), colspan=2, rowspan=1)
            plt.errorbar(biglc_jd_time,
                         biglc_counts,
                         yerr=biglc_err,
                         color=bandcolors[band],
                         marker='.',
                         ls='-',
                         zorder=3,
                         ms=15,
                         label=band)
            plt.errorbar(alldata_jd_tmean,
                         alldata_flux_bgsub,
                         yerr=alldata_flux_bgsub_err,
                         color='black',
                         marker='.',
                         zorder=2,
                         ls='',
                         alpha=.125)
            plt.xlabel('Time [s]')
            plt.ylabel('Relative Counts per Second')
            #Plot data in other band
            if other_band_exists:
                print(f"Plotting additional LC data for {band_other} band")
                plt.errorbar(alldata_jd_tmean_other,
                             alldata_flux_bgsub_other,
                             yerr=alldata_flux_bgsub_err_other,
                             color=bandcolors[band_other],
                             marker='.',
                             ls='',
                             zorder=1,
                             alpha=.25,
                             label=band_other)
            plt.xlabel('Time [s]')
            plt.ylabel('Flux MMI')
            plt.legend()

            #Plot ASASSN data
            plt.subplot2grid((2, 2), (1, 0), colspan=1, rowspan=1)
            axASASSN_LC = plt.gca()
            axASASSN_LC = WDutils.plotASASSN_LC(axASASSN_LC, asassn_name)
            axASASSN_LC = WDutils.plotparams(axASASSN_LC)

            plt.subplot2grid((2, 2), (1, 1), colspan=1, rowspan=1)
            axASASSN_pgram = plt.gca()
            axASASSN_pgram = WDutils.plotASASSN_pgram(axASASSN_pgram,
                                                      asassn_name)
            axASASSN_pgram = WDutils.plotparams(axASASSN_pgram)

        else:
            figall, axall = plt.subplots(1, 1, figsize=(16, 12))
            figall.tight_layout(rect=[0, 0.03, 1, 0.95])
            #Plot total light curve
            axall.errorbar(biglc_jd_time,
                           biglc_counts,
                           yerr=biglc_err,
                           color=bandcolors[band],
                           marker='.',
                           ls='-',
                           zorder=3,
                           ms=15,
                           label=band)
            axall.errorbar(alldata_jd_tmean,
                           alldata_flux_bgsub,
                           yerr=alldata_flux_bgsub_err,
                           color='black',
                           marker='.',
                           zorder=2,
                           ls='',
                           alpha=.125)
            #Plot data in other band
            if other_band_exists:
                print(f"Plotting additional LC data for {band_other} band")
                axall.errorbar(alldata_jd_tmean_other,
                               alldata_flux_bgsub_other,
                               yerr=alldata_flux_bgsub_err_other,
                               color=bandcolors[band_other],
                               marker='.',
                               ls='',
                               zorder=1,
                               alpha=.25,
                               label=band_other)
            axall.set_xlabel('Time [s]')
            axall.set_ylabel('Flux MMI')
            axall.legend()

        #Supertitle
        figall.suptitle(
            f"Combined Light curve for {source} in {band} \n"
            f"Best rank {round(bestrank, 2)} in df {best_expt_group} \n"
            f"Total rank {round(totalrank, 2)} in {len(data)} groups")

        all1saveimagepath = f"PDFs/{source}-{band}all1.pdf"
        figall.savefig(all1saveimagepath)
        #Clear figure
        figall.clf()
        plt.close('all')

        ###Page 2### Magnitude sigma plot and Source information
        #Get info from sigmamag csv file (from WDsigmamag)
        figall2, axall2 = plt.subplots(2, 1, figsize=(16, 12))
        figall2.tight_layout(rect=[0, 0.03, 1, 0.95])
        if band == 'NUV':
            df_sigmamag = pd.read_csv(sigmamag_path_NUV)
        else:
            assert (band == 'FUV')
            df_sigmamag = pd.read_csv(sigmamag_path_FUV)
        #Pull values, weights
        allmags = df_sigmamag['m_ab']
        allsigma = df_sigmamag['sigma_m']
        df_alphas = df_sigmamag['weight']
        rgb_1 = np.zeros((len(df_alphas), 4))
        rgb_1[:, 3] = df_alphas
        #Create magnitude bins using np.digitize
        axall2[0].scatter(allmags, allsigma, color=rgb_1, zorder=1, s=5)

        #Get information from magdic
        sourcemags = np.array(magdic['mag'])
        sourcesigmas = np.array(magdic['sigma'])
        sourcealphas = np.array(magdic['weight'])
        #Make lists for arrow points (above .3 sigma)
        arrow_mag = []
        arrow_sigma = []
        arrow_alpha = []
        idx_arrow = np.where(sourcesigmas > .3)[0]
        for idx in idx_arrow:
            arrow_mag.append(sourcemags[idx])
            arrow_sigma.append(.29)
            arrow_alpha.append(sourcealphas[idx])

        #Drop these indicies from the source arrays
        sourcemags = np.delete(sourcemags, idx_arrow)
        sourcesigmas = np.delete(sourcesigmas, idx_arrow)
        sourcealphas = np.delete(sourcealphas, idx_arrow)

        #Make color code information
        rgb_2 = np.zeros((len(sourcealphas), 4))
        rgb_2[:, 0] = 1.0
        rgb_2[:, 3] = sourcealphas

        #Make color code information for arrow
        rgb_arrow = np.zeros((len(arrow_alpha), 4))
        rgb_arrow[:, 0] = .3
        rgb_arrow[:, 1] = .7
        rgb_arrow[:, 2] = 1.0
        rgb_arrow[:, 3] = arrow_alpha

        axall2[0].scatter(sourcemags, sourcesigmas, color=rgb_2, zorder=2)
        axall2[0].scatter(arrow_mag,
                          arrow_sigma,
                          color=rgb_arrow,
                          marker="^",
                          zorder=3)
        axall2[0].set_title("Sigma as a function of AB mag")
        axall2[0].set_xlabel("AB mag")
        axall2[0].set_ylabel("Sigma")
        axall2[0].set_ylim(ymin=0)
        axall2[0].set_ylim(ymax=.3)
        axall2[0].set_xlim(xmin=13)
        axall2[0].set_xlim(xmax=23)

        ###Information for text subplot
        axall2[1].set_ylim(ymin=0, ymax=1)
        information1 = """
        Source name:         \n
        Band:                \n
        ABMagnitude:         \n
        g Magitude:          \n
        Spectral Type:       \n
        SIMBAD Designation:  \n
        SIMBAD Type list:    \n
        Known Variability:   \n
        Known Binarity:      \n
        Has Disk:            \n
        Strongest Period:    \n
        """
        information2 = (f"{source} \n"
                        f"{band} \n"
                        f"{round(m_ab, 4)} \n"
                        f"{round(gmag, 4)} \n"
                        f"{spectype} \n"
                        f"{simbad_name} \n"
                        f"{simbad_types}"
                        f"{variability} \n"
                        f"{binarity} \n"
                        f"{hasdisk} \n"
                        f"{period_to_save} \n")
        axall2[1].text(.2, 1, information1, size=15, ha='left', va='top')
        axall2[1].text(.7, 1, information2, size=15, ha='right', va='top')
        axall2[1].axis('off')

        all2saveimagepath = f"PDFs/{source}-{band}all2.pdf"
        figall2.savefig(all2saveimagepath)

        #Clear figure
        figall.clf()
        plt.close('all')

        #Generate PDF
        subprocess.run(['PDFcreator', '-s', source, '-b', band])
Example #20
0
#t,A,B,C,D,E,F,dyy = np.loadtxt('methanol_peaks02.csv', unpack = True)
B, t = np.loadtxt('merged-file', unpack=True)

#ts = data[1:,i]
n = len(B)
m = len(t)
tsnew = np.empty(m)
dy = np.full(m, 0.1)
p0 = [10, -0.001]
popt, pcov = curve_fit(func, t, B, p0)
#print(popt)
fitfunc2 = func(t, popt[0], popt[1])
for j in range(0, len(B)):
    tsnew[j] = B[j] - fitfunc2[j]
ls = LombScargle(t, tsnew, dy)
nu, power = ls.autopower()
power = LombScargle(t, tsnew, dy).power(freq)
fileout = open("merged-file", "w")
print(1.0e0 / freq[np.argmax(power)])
print(freq[np.argmax(power)])
pmax = (max(power))
#print(ls.false_alarm_probability(power.max()))
probabilities = [0.1, 0.05, 0.001]
#plevel = (ls.false_alarm_level(probabilities))
#plt.axes().set_aspect('1')
plt.clf()
plt.cla()
plt.xlim(wmin, wmax)
#plt.ylim(15,52)

plt.xlabel('Frequency (day$^{-1}$)', fontsize=18)
Example #21
0
def kepwindow(infile,
              outfile=None,
              datacol='SAP_FLUX',
              nyqfactor=0.01,
              plot=False,
              noninteractive=False,
              overwrite=False,
              verbose=False,
              logfile='kepwindow.log'):
    """
    kepwindow -- Calculate and store the window function for a Kepler time
    series

    Kepler time stamps are not perfectly uniform. There are gaps in the data due
    to operational pauses and issues, and timestamps are corrected to the
    barycenter of the solar system. The size of the barycenter correction is
    time-dependent. kepwindow calculates a discrete window function for a
    user-provided Kepler time series. This is calculated using a Lomb-Scargle
    periodogram. The result is stored in a new FITS file that is a direct copy
    of the input file but with an additional table extension containing the
    window function.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing a Kepler light
        curve within the first data extension.
    outfile : str
        The name of the output FITS file with a new extension containing the
        window function.
    datacol : str
        The name of the FITS table column in extension 1 of infile with which
        the window function should be coupled to. While the window function
        ostensibly requires the timing information, this particular piece of
        information is required so that the task can search the datacol array for
        bad data such as instances of NaN. These will be rejected before the
        window function is calculated.
    nyqfactor : int
        The number of nyquist factors up to which to evaluate. Kepler data is
        fairly regular and so the default will usually encompass most of the window.
    plot : bool
        Plot the output window function?
    non-interactive : bool
        If True, prevents the matplotlib window to pop up.
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile : str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block:: bash

        $ kepwindow kplr002436324-2009259160929_llc.fits --datacol SAP_FLUX
        --nyqfactor 0.02 --plot --verbose

    .. image:: ../_static/images/api/kepwindow.png
        :align: center
    """

    if outfile is None:
        outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
    ## log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPWINDOW -- ' + ' infile={}'.format(infile) +
            ' outfile={}'.format(outfile) + ' datacol={}'.format(datacol) +
            ' nyqfactor={}'.format(nyqfactor) + ' plot='.format(plot) +
            ' noninteractive='.format(noninteractive) +
            ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) +
            ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call + '\n', verbose)

    ## start time
    kepmsg.clock('KEPWINDOW started at', logfile, verbose)
    ## overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = ('ERROR -- KEPWINDOW: {} exists. Use overwrite=True'.format(
            outfile))
        kepmsg.err(logfile, errmsg, verbose)

    ## open input file
    instr = pyfits.open(infile)
    tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
                                                    verbose)
    try:
        barytime = instr[1].data.field('barytime')
    except:
        barytime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
                                     verbose)
    barytime = barytime[np.isfinite(barytime)]
    ls = LombScargle(barytime, 1, center_data=False, fit_mean=False)
    freqW, powerW = ls.autopower(nyquist_factor=nyqfactor)
    freqW = np.append(-freqW[::-1], freqW)
    powerW = np.append(powerW[::-1], powerW)
    freqW, powerW = np.sort(freqW), powerW[np.argsort(freqW)]
    freqW, powerW = freqW[np.isfinite(powerW)], powerW[np.isfinite(powerW)]

    plt.figure()
    plt.axes([0.06, 0.113, 0.93, 0.86])
    plt.plot(freqW, powerW, color='#363636', linestyle='-', linewidth=1.0)
    plt.fill(freqW, powerW, color='#a8a7a7', linewidth=0.0, alpha=0.2)
    plt.xlabel(r'Frequency (d$^{-1}$)', {'color': 'k'})
    plt.ylabel('Power', {'color': 'k'})
    plt.grid()
    plt.savefig(re.sub('.fits', '.png', outfile), bbox_inches='tight')
    if not noninteractive:
        plt.show()

    col1 = pyfits.Column(name='FREQUENCY',
                         format='E',
                         unit='days',
                         array=freqW)
    col2 = pyfits.Column(name='POWER', format='E', array=powerW)
    cols = pyfits.ColDefs([col1, col2])
    instr.append(pyfits.BinTableHDU.from_columns(cols))
    instr[-1].header['EXTNAME'] = ('WINDOW FUNCTION', 'extension name')
    kepmsg.log(logfile, "Writing output file {}...".format(outfile), verbose)

    ## comment keyword in output file
    kepkey.comment(call, instr[0], outfile, logfile, verbose)
    instr.writeto(outfile)

    ## close input file
    instr.close()
    kepmsg.clock('KEPWINDOW completed at', logfile, verbose)
    return
Example #22
0
    def lsfit(self, iterations=100, plot=False):
        print(f"Fitting {self.filename}")
        if not self.timereset:
            self.reset_time()
        # Drop colord points from main band data
        coloredtup = WDutils.ColoredPoints(self.df)
        redpoints = coloredtup.redpoints
        bluepoints = coloredtup.bluepoints
        droppoints = np.unique(np.concatenate([redpoints, bluepoints]))
        df_reduced = self.df.drop(index=droppoints)
        df_reduced = df_reduced.reset_index(drop=True)
        df_reduced = WDutils.df_firstlast(df_reduced)
        # Rescale on percentage scale
        relativetup = WDutils.relativescales(df_reduced)
        t_mean = np.array(relativetup.t_mean)
        flux_bgsub = np.array(relativetup.flux)
        flux_bgsub_err = np.array(relativetup.err)
        # Want time in seconds, rather than minutes
        time_seconds = t_mean * 60

        # Construct LS periodogram to get period guess
        ls = LombScargle(t_mean, flux_bgsub)
        freq, amp = ls.autopower(nyquist_factor=1)
        freq_max = freq[np.where(np.array(amp) == max(amp))[0][0]]
        period_guess = (1 / freq_max) * 60

        # Reduction for other band
        if self.band == 'NUV':
            if self.FUVexists():
                othertup = self.FUVmatch(scale=100)
            else:
                othertup = None
        else:
            if self.NUVexists():
                othertup = self.NUVmatch(scale=100)
            else:
                othertup = None
        if othertup is not None:
            if len(othertup.t_mean) > 15:
                flux_bgsub_other = othertup.flux
                flux_bgsub_err_other = othertup.err
                time_seconds_other = np.array(othertup.t_mean) * 60
                other_band_exists = True
            else:
                other_band_exists = False
        else:
            other_band_exists = False

        # Guess parameters, same between bands except for amplitude
        guess_mean = np.mean(flux_bgsub)
        guess_phase = 0
        guess_freq = (2 * np.pi) / period_guess
        guess_amp = np.max(flux_bgsub)
        if other_band_exists:
            guess_amp_other = np.max(flux_bgsub_other)

        lower_bounds = np.array([0, guess_freq - (1 / 800), -np.inf, -50])
        upper_bounds = np.array([np.inf, guess_freq + (1 / 800), np.inf, 50])
        bounds = (lower_bounds, upper_bounds)
        # Fitting for main band
        data_guess = (
            guess_amp * np.sin(time_seconds * guess_freq + guess_phase) +
            guess_mean)
        amp_list = []
        p_list = []
        pbar = ProgressBar()
        for i in pbar(range(iterations)):
            bs_flux = []
            for ii in range(len(flux_bgsub)):
                val = flux_bgsub[ii]
                err = flux_bgsub_err[ii]
                bs_flux.append(np.random.normal(val, err))

            optimize_func = lambda x: x[0] * np.sin(x[1] * time_seconds + x[2]
                                                    ) + x[3] - bs_flux

            out = least_squares(
                optimize_func,
                [guess_amp, guess_freq, guess_phase, guess_mean],
                bounds=bounds)
            est_amp, est_freq, est_phase, est_mean = out.x

            amp_list.append(est_amp)
            p_list.append((2 * np.pi) / est_freq)

        # Identical fitting process for other band
        if other_band_exists:
            amp_list_other = []
            p_list_other = []
            pbar2 = ProgressBar()
            for i in pbar2(range(iterations)):
                bs_flux_other = []
                for ii in range(len(flux_bgsub_other)):
                    val = flux_bgsub_other[ii]
                    err = flux_bgsub_err_other[ii]
                    bs_flux_other.append(np.random.normal(val, err))

                optimize_func = lambda x: x[0] * np.sin(x[
                    1] * time_seconds_other + x[2]) + x[3] - bs_flux_other

                out_other = least_squares(
                    optimize_func,
                    [guess_amp_other, guess_freq, guess_phase, guess_mean],
                    bounds=bounds)
                est_amp_other, est_freq_other, est_phase_other, est_mean_other = out_other.x
                amp_list_other.append(est_amp_other)
                p_list_other.append((2 * np.pi) / est_freq_other)

        # Plotting option
        if plot:
            fig, ax = plt.subplots(1, 1, figsize=(8, 4))

            fine_t = np.arange(min(time_seconds), max(time_seconds), 0.1)
            data_fit = est_amp * np.sin(est_freq * fine_t +
                                        est_phase) + est_mean

            colors = {'NUV': 'xkcd:red', 'FUV': 'xkcd:azure'}

            markers, caps, bars = ax.errorbar(time_seconds,
                                              flux_bgsub,
                                              yerr=flux_bgsub_err,
                                              color=colors[self.band],
                                              alpha=.75,
                                              marker='.',
                                              ls='',
                                              ms=10)
            [bar.set_alpha(.25) for bar in bars]

            if other_band_exists:
                ax2 = ax.twinx()
                ax2.minorticks_on()
                ax2.tick_params(direction='in', which='both', labelsize=15)
                markers2, caps2, bars2 = ax2.errorbar(
                    time_seconds_other,
                    flux_bgsub_other,
                    yerr=flux_bgsub_err_other,
                    color=colors[self.otherband],
                    alpha=.5,
                    marker='.',
                    ls='',
                    ms=8)
                [bar.set_alpha(.25) for bar in bars2]

                ax2.tick_params(axis='y',
                                colors=colors[self.otherband],
                                which='both')

                for axis in ['top', 'bottom', 'left', 'right']:
                    ax2.spines[axis].set_linewidth(1.5)

            ax = WDutils.plotparams(ax)

            ax.plot(time_seconds, data_guess, label='first guess')
            ax.plot(fine_t, data_fit, label='after fitting')
            ax.legend()
            plt.show()

            fig.savefig(f"{self.filename}-fit.pdf")

        # Save output in a named tuple
        OutputTup = collections.namedtuple("OutputTup", [
            'NUVperiod', 'NUVperr', 'NUVamp', 'NUVaerr', 'FUVperiod',
            'FUVperr', 'FUVamp', 'FUVaerr'
        ])
        if self.band == 'NUV':
            if (other_band_exists
                    and (abs(np.mean(p_list) - np.mean(p_list_other)) < 150)):
                tup = OutputTup(np.mean(p_list), np.std(p_list),
                                abs(np.mean(amp_list)), np.std(amp_list),
                                np.mean(p_list_other), np.std(p_list_other),
                                abs(np.mean(amp_list_other)),
                                np.std(amp_list_other))
            else:
                tup = OutputTup(np.mean(p_list), np.std(p_list),
                                abs(np.mean(amp_list)), np.std(amp_list), None,
                                None, None, None)
        else:
            if (other_band_exists
                    and (abs(np.mean(p_list) - np.mean(p_list_other)) < 150)):
                tup = OutputTup(np.mean(p_list_other), np.std(p_list_other),
                                abs(np.mean(amp_list_other)),
                                np.std(amp_list_other), np.mean(p_list),
                                np.std(p_list), abs(np.mean(amp_list)),
                                np.std(amp_list))
            else:
                tup = OutputTup(None, None, None, None, np.mean(p_list),
                                np.std(p_list), abs(np.mean(amp_list)),
                                np.mean(amp_list))
        return tup
Example #23
0
def LSfindP(file_name,
            data,
            plots=False,
            file_type='.png',
            min_period=0.1,
            max_period=30,
            root_dir='/Volumes/Zoe Bell Backup/'):
    '''
    Takes the name of a .fits file (without the extension) from the Everest K2 data and the data from that file read into a table,
    and optionally whether you want a plot saved, the file type you want it saved as, the minimum and maximum periods you want to look for, 
    and the root directory in which to save the plot within the folder LSPlotOutputs.
    Uses the Lomb-Scargle periodogram method to return the file name, the period that best fits the corrected flux data in that range, 
    the power at that period, and the associated false alarm probability.
    '''
    #start = file_name.rfind('/') + 1
    #name = file_name[start:-5]
    name = file_name

    #data = Table.read(file_name, format='fits')
    ok = np.where((data['QUALITY'] == 0) & (np.isfinite(data['TIME']))
                  & (np.isfinite(data['FCOR'])
                     & (np.isfinite(data['FRAW_ERR']))))

    t = np.array(data['TIME'][ok])
    fcor = np.array(data['FCOR'][ok])
    frawErr = np.array(data['FRAW_ERR'][ok])

    ls = LombScargle(t, fcor, frawErr)
    freq, power = ls.autopower(minimum_frequency=1 / max_period,
                               maximum_frequency=1 / min_period)
    best_freq = freq[np.argmax(power)]
    max_power = np.max(power)

    if (plots):
        plt.figure(figsize=(10, 7))

        plt.subplot(211)
        plt.plot(1 / freq, power)
        plt.title('Periodogram')
        plt.xlabel('Period')
        plt.ylabel('Power')
        plt.annotate('best period',
                     xy=(1 / best_freq, max_power),
                     xytext=(1 / best_freq * 0.5, max_power * 0.9),
                     arrowprops=dict(facecolor='black', width=1, headwidth=5))

        plt.subplot(212)
        t_fit = np.linspace(np.min(t),
                            np.max(t))  # make just select first and last
        f_fit = LombScargle(t, fcor, frawErr).model(t_fit, best_freq)
        plt.plot(t, fcor)
        plt.plot(t_fit, f_fit)
        plt.title('Comparison of Data and Model')
        plt.xlabel('Time')
        plt.ylabel('Flux')

        plt.suptitle(name)
        plt.tight_layout()
        plt.subplots_adjust(top=0.9)
        plt.savefig(root_dir + 'LSPlotOutputs/' + name + file_type, dpi=150)
        plt.close()

    return [
        name, 1 / best_freq, max_power,
        ls.false_alarm_probability(max_power)
    ]
print("There are: " + str(len(star_flux)))
print("Begin: " + str(np.min(star_time)))
print("End: " + str(np.max(star_time)))


ls = LombScargle(star_time, star_flux, star_flux_err)

if len(sys.argv) > 2: 
     min_period = sys.argv[1]
     max_period = sys.argv[2]
else:
     "You should specify the min and max periods. The default is being used (0.1:50 days)" 
     min_period = 0.1
     max_period = 50

freq, PLS = ls.autopower(minimum_frequency=1./max_period,
                         maximum_frequency=1./min_period)

best_freq = freq[np.argmax(PLS)]

phase = (star_time * best_freq) % 1

#Compute the best-fit model
phase_fit = np.linspace(0, 1)
mag_fit = ls.model(t=phase_fit / best_freq,
                   frequency=best_freq)
period = 1./best_freq

#Set up the figure & axes for plotting
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
fig.suptitle('Lomb-Scargle Periodogram (period= ' + str(period)  + 'days)')
fig.subplots_adjust(bottom=0.12, left=0.07, right=0.95)
Example #25
0
import scipy.io as sio
import matplotlib.pyplot as plt

mat_contents = sio.loadmat(
    '/home/aldo/Documents/Projects/Avtivemeter/Data/code_cruze_paper/Re actimetry/data_acti_4days.mat'
)

t = mat_contents['tt_4']
y = mat_contents['yy_4']

t = t.flatten()
y = y.flatten()
dy = 0.1

ls = LombScargle(t, y, dy)
freq, power = ls.autopower()
print(power.max())
print(ls.false_alarm_probability(power.max()))

plt.plot(freq, power)

best_frequency = frequency[np.argmax(power)]
t_fit = np.linspace(0, 1)
y_fit = LombScargle(t, y, dy).model(t_fit, best_frequency)

print("Now using fastchi2")
frequency, power = LombScargle(t, y).autopower(method='fastchi2')

print(power.max())
print(ls.false_alarm_probability(power.max()))
#pycs.gen.lc.display([lc], [spline])

#x = -0.5 + np.random.rand(1000)
#f = np.sin(10 * 2 * np.pi * x) + np.sin(15 * 2 * np.pi * x) + 20*np.sin(5 * 2 * np.pi * x)+ np.sin(100 * 2 * np.pi * x)

#k = -125 + np.arange(250)
#f_k = ndft(x, f, len(k))
'''
k = np.linspace(-1/(mjhd[1]-mjhd[0]),1/(mjhd[1]-mjhd[0]), n_freq)
new_k = np.linspace(0,1/(mjhd[1]-mjhd[0]), n_freq)
f_k=ndft(mjhd, mag_ml, len(k))
f,Pxx = periodogram(new_magml, sampling)
'''

ls = LombScargle(mjhd, mag_ml, err_mag_ml)
frequency, power = ls.autopower(minimum_frequency=0, maximum_frequency=1 / 50)
print frequency
print power
p_schuster = schuster_periodogram(mjhd, mag_ml, frequency)
#f,Pxx = periodogram(new_magml, sampling)

period_days = 1. / frequency
period_hours = period_days * 24

best_period = period_days[np.argmax(power)]
phase = (mjhd / best_period)

fig, ax = plt.subplots(1, 2, figsize=(14, 5))
ax2 = ax[0].twinx()

ax[0].plot(frequency, power, '-k', label="Lomb-Scargle")
Example #27
0
    def from_lightcurve(lc: LightCurve,
                        f_min=None,
                        f_max=None,
                        remove_ranges: List[Tuple[float]] = None,
                        samples_per_peak=10):
        """
        Computes a periodogram from a Lightcurve object and normalizes it according to Parcivals theorem. It then
        reflects the physical values in the Light curve and has the same units. It then returns a Periodogram object.

        It also has a possibility to remove certain ranges from the periodogram.
        :param lc: Lightcurve object
        :param f_min: Lower range for the periodogram
        :param f_max: Upper range for the periodogram
        :param remove_ranges: List of tuples, that represent areas in the periodogram that are ignored. These are
        removed from the periodogram
        :param samples_per_peak: number of samples per peak
        :return: Periodogram object
        """

        nyquist = 1 / (2 * np.median(np.diff(lc.time)))

        try:
            time = lc.time.value
        except AttributeError:
            time = lc.time

        try:
            flux = lc.flux.value
        except AttributeError:
            flux = lc.flux

        ls = LombScargle(time, flux, normalization='psd')

        if f_max is not None and f_max > nyquist:
            #todo warning here!
            pass

        if f_min is None:
            f_min = 0

        if f_max is None:
            f_max = nyquist

        f, p = ls.autopower(minimum_frequency=f_min,
                            maximum_frequency=f_max,
                            samples_per_peak=samples_per_peak,
                            nyquist_factor=1)

        # normalization of psd in order to get good amplitudes
        p = np.sqrt(4 / len(time)) * np.sqrt(p)

        # removing first item
        p = p[1:]
        f = f[1:]

        if remove_ranges is not None:
            mask_list = []
            for r in remove_ranges:
                mask = f < r[0]
                mask = np.logical_or(mask, f > r[1])
                mask_list.append(mask)

            combined_mask = mask_list[0]
            if len(mask_list) > 1:
                for m in mask_list[1:]:
                    combined_mask = np.logical_and(combined_mask, m)

            f = f[combined_mask]
            p = p[combined_mask]

        return Periodogram(f * (1 / cds.d),
                           p * cds.ppm,
                           nyquist=nyquist,
                           targetid=lc.targetid)
Example #28
0
def main(kc19_groupid=113, Tmag_cutoff=14, clean_gaia_cache=False):

    #
    # get info needed to query gaia for comparison stars
    #
    source_df = pd.read_csv('../data/kounkel_table1_sourceinfo.csv')
    sdf = source_df[(source_df['Tmag_pred'] < Tmag_cutoff)
                    & (source_df['group_id'] == kc19_groupid)]
    n_sel_sources_in_group = len(sdf)

    df2 = pd.read_csv('../data/string_table2.csv')

    gdf = df2[df2['group_id'] == kc19_groupid]

    group_coord = SkyCoord(float(gdf['l']) * u.deg,
                           float(gdf['b']) * u.deg,
                           frame='galactic')
    ra = group_coord.icrs.ra
    dec = group_coord.icrs.dec
    plx_mas = float(gdf['parallax'])

    #
    # define relevant directories / paths
    #
    gaiadir = os.path.join(basedir, 'gaia_queries')
    if not os.path.exists(gaiadir):
        os.mkdir(gaiadir)

    outfile = os.path.join(
        gaiadir, 'group{}_comparison_sample.xml.gz'.format(kc19_groupid))

    #
    # run the gaia query. require the same cuts imposed by Kounkel & Covey 2019
    # on stellar quality. also require close on-sky (within 5 degrees of KC19
    # group position), and close in parallax space (within +/-20% of KC19
    # parallax).
    #
    if clean_gaia_cache and os.path.exists(outfile):
        os.remove(outfile)

    if not os.path.exists(outfile):

        Gaia.login(credentials_file=os.path.join(homedir, '.gaia_credentials'))

        jobstr = ('''
        SELECT *
        FROM gaiadr2.gaia_source
        WHERE 1=CONTAINS(
          POINT('ICRS', ra, dec),
            CIRCLE('ICRS', {ra:.8f}, {dec:.8f}, {sep_deg:.1f}))
        AND parallax < {plx_upper:.2f} AND parallax > {plx_lower:.2f}
        AND parallax > 1
        AND parallax_error < 0.1
        AND 1.0857/phot_g_mean_flux_over_error < 0.03
        AND astrometric_sigma5d_max < 0.3
        AND visibility_periods_used > 8
        AND (
                (astrometric_excess_noise < 1)
                OR
                (astrometric_excess_noise > 1 AND astrometric_excess_noise_sig < 2)
        )
        ''')

        query = jobstr.format(sep_deg=5.0,
                              ra=ra.value,
                              dec=dec.value,
                              plx_upper=1.3 * plx_mas,
                              plx_lower=0.7 * plx_mas)

        if not os.path.exists(outfile):
            print(42 * '-')
            print('launching\n{}'.format(query))
            print(42 * '-')
            j = Gaia.launch_job(query=query,
                                verbose=True,
                                dump_to_file=True,
                                output_file=outfile)

        Gaia.logout()

    vot = parse(outfile)
    tab = vot.get_first_table().to_table()
    field_df = tab.to_pandas()

    #
    # require the same Tmag cutoff for the nbhd stars. ensure no overlap w/
    # sample of stars from the group itself. then randomly sample the
    # collection of stars.
    #

    Tmag_pred = (
        field_df['phot_g_mean_mag'] - 0.00522555 *
        (field_df['phot_bp_mean_mag'] - field_df['phot_rp_mean_mag'])**3 +
        0.0891337 *
        (field_df['phot_bp_mean_mag'] - field_df['phot_rp_mean_mag'])**2 -
        0.633923 *
        (field_df['phot_bp_mean_mag'] - field_df['phot_rp_mean_mag']) +
        0.0324473)

    field_df['Tmag_pred'] = Tmag_pred

    sfield_df = field_df[field_df['Tmag_pred'] < Tmag_cutoff]
    common = sfield_df.merge(sdf, on='source_id', how='inner')
    sfield_df = sfield_df[~sfield_df.source_id.isin(common.source_id)]

    n_field = len(sfield_df)

    if 2 * n_sel_sources_in_group > n_field:
        errmsg = (
            'ngroup: {}. nfield: {}. plz tune gaia query to get >2x the stars'.
            format(n_sel_sources_in_group, n_field))
        raise AssertionError(errmsg)

    srfield_df = sfield_df.sample(n=n_sel_sources_in_group)

    #
    # now given the gaia ids, get the rotation periods
    #
    for ix, r in srfield_df.iterrows():

        source_id = np.int64(r['source_id'])
        ra, dec = float(r['ra']), float(r['dec'])
        group_id = kc19_groupid
        name = str(gdf['name'].iloc[0])

        c_obj = SkyCoord(ra, dec, unit=(u.deg, u.deg), frame='icrs')

        #
        # require that we are on-silicon. for year 1, this roughly means --
        # were are in southern ecliptic hemisphere
        #
        if c_obj.barycentrictrueecliptic.lat > 0 * u.deg:
            print('group{}, {}: found in northern hemisphere. skip!'.format(
                group_id, name))
            continue

        workingdir = os.path.join(
            basedir, 'fits_pkls_results_pngs',
            'field_star_comparison_group{}_name{}'.format(group_id, name))
        if not os.path.exists(workingdir):
            os.mkdir(workingdir)
        workingdir = os.path.join(workingdir, str(source_id))
        if not os.path.exists(workingdir):
            os.mkdir(workingdir)

        outvppath = os.path.join(workingdir,
                                 'verification_page_{}.png'.format(source_id))
        if os.path.exists(outvppath):
            print('found {}, continue'.format(outvppath))
            continue

        #
        # if you already downloaded ffi cutouts for this object, dont get any
        # more. otherwise, get them
        #
        cutouts = glob(os.path.join(workingdir, '*.fits'))
        if len(cutouts) >= 1:
            print('found {} cutouts in {}, skip'.format(
                len(cutouts), workingdir))
        else:
            gfc.get_fficutout(c_obj, cutoutdir=workingdir)

        #
        # given the FFI cutouts, make simple light curves.
        #
        cutouts = glob(os.path.join(workingdir, '*.fits'))
        if len(cutouts) >= 1:
            d = glgf.get_lc_given_fficutout(workingdir,
                                            cutouts,
                                            c_obj,
                                            return_pkl=False)
        else:
            d = np.nan
            print('WRN! did not find fficutout for {}'.format(workingdir))

        if not isinstance(d, dict):
            print('WRN! got bad light curve for {}. skipping.'.format(
                workingdir))
            continue

        outpath = os.path.join(workingdir, 'GLS_rotation_period.results')

        #
        # do Lomb scargle w/ uniformly weighted points.
        #
        ls = LombScargle(d['time'], d['rel_flux'])
        period_min = 0.1
        period_max = np.min(
            [0.9 * (np.max(d['time']) - np.min(d['time'])), 16])
        freq, power = ls.autopower(minimum_frequency=1 / period_max,
                                   maximum_frequency=1 / period_min)
        try:
            _ = power.max()
        except ValueError:
            print('WRN! got bad Lomb-Scargle for {}. skipping.'.format(
                workingdir))
            continue

        ls_fap = ls.false_alarm_probability(power.max(), method='baluev')
        ls_period = 1 / freq[np.argmax(power)]

        d['ls_fap'] = ls_fap
        d['ls_period'] = ls_period

        #
        # try to get TIC Teff. search TIC within 5 arcseconds, then take the
        # Gaia-ID match.  (removing sources with no gaia ID, which do exist in
        # TICv8.
        #
        radius = 5.0 * u.arcsecond

        stars = Catalogs.query_region("{} {}".format(float(c_obj.ra.value),
                                                     float(c_obj.dec.value)),
                                      catalog="TIC",
                                      radius=radius)

        nbhr_source_ids = np.array(stars['GAIA'])

        stars = stars[nbhr_source_ids != '']
        nbhr_source_ids = nbhr_source_ids[nbhr_source_ids != '']

        sel = nbhr_source_ids.astype(int) == source_id

        if len(sel[sel]) == 1:
            star = stars[sel]
        else:
            raise NotImplementedError('did not get any TIC match. why?')

        teff = float(star['Teff'])
        if not isinstance(teff, float) and np.isfinite(teff):
            raise NotImplementedError('got nan TIC teff. what do?')

        #
        # make "check plot" analog for visual inspection
        #
        outd = {
            'ls_fap': d['ls_fap'],
            'ls_period': d['ls_period'],
            'source_id': source_id,
            'ra': ra,
            'dec': dec,
            'name': name,
            'group_id': group_id,
            'teff': teff
        }
        pu.save_status(outpath, 'lomb-scargle', outd)

        vp.generate_verification_page(d, ls, freq, power, cutouts, c_obj,
                                      outvppath, outd)
Example #29
0
def QPO_detect(tf_path,
               output=-1,
               df_path=-1,
               minfreq=0,
               maxfreq=1,
               sinterms=1,
               saveimage=True,
               savetext=False,
               is_window=False,
               norm='standard',
               renorm=False,
               poisson=True,
               FALs=True,
               plottitle="Lomb-Scargle Periodogram",
               plotcolor='black',
               probs=[0.95, 0.5, 0.05]):
    """
    Input: many, many arguments. All but the first are optional. 
         tf_path: path to data file
         output: path to output image (default ~/lomb_scargle.png)
         df_path: path to output text file (default ~/lomb_scargle.txt)
         minfreq: minimum allowed frequency (default 0 Hz)
         maxfreq: maximum allowed frequency (default 1 Hz)
         sinterms: no. of sin terms to be used in Lomb-Scargle fit (default 1)
         saveimage: whether or not to save the plot (default True)
         savetext: whether or not to save results to text file (default True)
         is_window: whether or not to show LS periodogram for the window of 
                    the input data (default False)
         norm: normalization to use (default 'standard')
         renorm: if True, set min. power to 0 and max. to 1 (default False)
         poisson: whether or not to apply correction to Poisson noise (default 
                  True)
         FALs: whether or not to plot false-alarm probability levels (default 
               True, unless poisson=True, in which case they are not)
         plottitle: a title for the plot (default "Lomb-Scargle Periodogram")
         plotcolor: color of plot (default black)
         probs: false alarm probability levels to plot if plotted (defaults
         are 95%, 50%, and 5% chance of data being a result of random 
         Gaussian noise)
         
    Output: a LombScargle object, frequency in the given interval, error on 
    frequency, power in the given interval, and false alarm probability levels
    ** Plots show frequency in mHz, but all frequency outputs are in Hz
    
    Assumes data reduction is complete and that the textfile at tf_path is 
    populated as one row per reduced image and tab-delimited columns: 
    (0) stack, (1) exp avg [ms], (2) exp stdev [ms], (3) time avg [s], 
    (4) time stdev [s], (5) centroid x_min, (6) centroid y_min, (7) pixel area, 
    (8) photon counts, (9) photon count error
    
    If renormalized, every power p obtained from the LS is divided by the 
    maximum power before plotting. 
    
    If poisson is True (default), a decaying exponential is fit to the local 
    minima of the powers obtained from the LS periodogram, which is then used 
    to correct all powers.
    """

    from astropy.stats import LombScargle
    import matplotlib.pyplot as plt
    import numpy as np

    # acquire the data for the Lomb-Scargle (LS) periodogram
    tf = open(tf_path, "r")
    contents = tf.readlines()
    tf.close()
    time = []
    time_err = []
    photon = []
    photon_err = []
    for line in contents:
        data = line.split("\t")
        time.append(float(data[3]))
        time_err.append(float(data[4]))
        photon.append(float(data[8]))
        photon_err.append(float(data[9]))

    # normalization of photon count with first count = 0
    first_count = photon[0]
    photon_normed = [p - first_count for p in photon]

    # normalization of time with t1 = 0
    first_time = time[0]
    time_normed = [t - first_time for t in time]

    # edit the center data and fit mean depending on whether the data is to be
    # viewed in window mode (i.e., if the periodogram is to show the window
    # which is convoluted with the raw data )
    cd_opt = True
    fm_opt = True
    if is_window:
        photon_normed = [1.0] * len(photon_normed)
        photon_err = None
        cd_opt = False
        fm_opt = False

    # if fit_mean is true, then (from LS documentation):
    # "Include a constant offset as part of the model at each frequency.
    # This can lead to more accurate results, especially in the case of
    # incomplete phase coverage."

    # if center_data is True, then (from LS documentation):
    # "Pre-center the data by subtracting the weighted mean of the input data.
    # This is especially important if fit_mean = False"

    # create the LS object using autopower to generate a frequency sweep
    limbo = LombScargle(time_normed,
                        photon_normed,
                        photon_err,
                        center_data=cd_opt,
                        fit_mean=fm_opt,
                        nterms=sinterms)

    # determine the error on the general frequency sweep to use in the
    # restricted frequencies error
    frequency, power = limbo.autopower(normalization=norm)
    frequency_rebin = np.histogram(frequency, len(time))[1].tolist()
    while (len(time) < len(frequency_rebin)):
        frequency_rebin.pop()
    frequency_err = [0]
    for i in range(1, len(frequency_rebin)):  # start at 1 to avoid t=0
        frequency_err.append(frequency_rebin[i] * time_err[i] / time[i])
    frequency_err[0] = frequency_err[1]
    frequency_err = np.histogram(frequency_err, len(frequency))[1].tolist()
    while (len(frequency) < len(frequency_err)):
        frequency_err.pop()

    # set the frequencies and powers using autopower to generate a frequency
    # sweep which is restricted to a given frequency interval
    frequency_strict, power_strict = limbo.autopower(minimum_frequency=minfreq,
                                                     maximum_frequency=maxfreq,
                                                     normalization=norm)

    # determine the error on the resticted frequencies sweeped
    frequency_strict_rebin = np.histogram(frequency_strict,
                                          len(time))[1].tolist()
    while (len(time) < len(frequency_strict_rebin)):
        frequency_strict_rebin.pop()
    frequency_strict_err = [0]
    for i in range(1, len(frequency_strict_rebin)):  # start at 1 to avoid t=0
        frequency_strict_err.append(frequency_strict_rebin[i] * time_err[i] /
                                    time[i])
    frequency_strict_err[0] = frequency_err[1]
    frequency_strict_err = np.histogram(frequency_strict_err,
                                        len(frequency_strict))[1].tolist()
    while (len(frequency_strict) < len(frequency_strict_err)):
        frequency_strict_err.pop()

    # enforce a renormalization of min power = 0, max power = 1 if desired
    if renorm:
        m = max(power)
        power = [p / m for p in power]
        ms = max(power_strict)
        power_strict = [ps / ms for ps in power_strict]

    # create the plot if image is to be saved, with mHz frequency units.
    # if FALs are to be added, calculate those and add to background.
    # if no output location is given, store as home/lomb_scargle.png
    frequency_strict_milli = [f * 1000.0 for f in frequency_strict]
    frequency_strict_milli_err = [f * 1000.0 for f in frequency_strict_err]

    # compute false alarm probabilities
    heights = limbo.false_alarm_level(probs)

    if poisson:  # if we want to apply correction to Poisson noise
        power_strict = correct_poisson(frequency_strict_milli,
                                       power_strict,
                                       fit_lows=True)
        if not (power_strict):  # if poisson corrections fails and returns None
            print("\nPoisson correction failed, breaking QPO detection.\n")
            return
        print("\nNote: FALs not plotted as Poisson noise is corrected for.\n")
        FALs = False

    if saveimage:  # if the image is to be saved
        if output == -1:
            from os.path import expanduser
            output = expanduser("~")
            output = output + "lomb_scargle.png"
        if not FALs:  # if false alarm probabilities should be plotted
            plt.switch_backend('agg')
            fig, ax0 = plt.subplots(figsize=(6, 2), nrows=1, sharex=False)
        else:
            plt.switch_backend('agg')
            fig, ax0 = plt.subplots(figsize=(6, 2), nrows=1, sharex=False)
            for i in range(len(heights)):
                ax0.axhline(y=heights[i], color='#d3d3d3')

        ax0.plot(frequency_strict_milli, power_strict, color=plotcolor)  #mHz
        ax0.set_title(plottitle)
        ax0.set_ylabel('Power')
        ax0.set_xlabel('Frequency [mHz]')
        plt.savefig(output, bbox_inches='tight')

    # create the text file if data is to be saved.
    # if no output location is given, store as ~/lomb_scargle.txt
    if savetext:  # if we want to save the results to a text file
        if df_path == -1:
            from os.path import expanduser
            df_path = expanduser("~")
            df_path = df_path + "lomb_scargle.txt"
        df = open(df_path, 'w+')
        for i in range(len(frequency)):  #Hz
            if (i < len(frequency_strict)):
                line = str(frequency[i]) + "\t" + str(power[i]) + "\t"
                line += str(frequency_strict[i]) + "\t" + str(
                    power_strict[i]) + "\n"
                df.write(line)
            else:
                line = str(frequency[i]) + "\t" + str(power[i]) + "\t \t \n"
                df.write(line)
        df.close()

    return limbo, frequency_strict, frequency_strict_err, power_strict, heights
Example #30
0
def lomb_scargle_estimator(x,
                           y,
                           yerr=None,
                           min_period=None,
                           max_period=None,
                           filter_period=None,
                           max_peaks=2,
                           **kwargs):
    """
    Estimate period of a time series using the periodogram

    Args:
        x (ndarray[N]): The times of the observations
        y (ndarray[N]): The observations at times ``x``
        yerr (Optional[ndarray[N]]): The uncertainties on ``y``
        min_period (Optional[float]): The minimum period to consider
        max_period (Optional[float]): The maximum period to consider
        filter_period (Optional[float]): If given, use a high-pass filter to
            down-weight period longer than this
        max_peaks (Optional[int]): The maximum number of peaks to return
            (default: 2)

    Returns:
        A dictionary with the computed ``periodogram`` and the parameters for
        up to ``max_peaks`` peaks in the periodogram.

    """
    if min_period is not None:
        kwargs["maximum_frequency"] = 1.0 / min_period
    if max_period is not None:
        kwargs["minimum_frequency"] = 1.0 / max_period

    # Estimate the power spectrum
    model = LombScargle(x, y, yerr)
    freq, power = model.autopower(method="fast", normalization="psd", **kwargs)
    power /= len(x)
    power_est = np.array(power)

    # Filter long periods
    if filter_period is not None:
        freq0 = 1.0 / filter_period
        filt = 1.0 / np.sqrt(1 + (freq0 / freq)**(2 * 3))
        power *= filt

    # Find and fit peaks
    peak_inds = (power[1:-1] > power[:-2]) & (power[1:-1] > power[2:])
    peak_inds = np.arange(1, len(power) - 1)[peak_inds]
    peak_inds = peak_inds[np.argsort(power[peak_inds])][::-1]
    peaks = []
    for i in peak_inds[:max_peaks]:
        A = np.vander(freq[i - 1:i + 2], 3)
        w = np.linalg.solve(A, np.log(power[i - 1:i + 2]))
        sigma2 = -0.5 / w[0]
        freq0 = w[1] * sigma2
        peaks.append(
            dict(
                log_power=w[2] + 0.5 * freq0**2 / sigma2,
                period=1.0 / freq0,
                period_uncert=np.sqrt(sigma2 / freq0**4),
            ))

    return dict(
        periodogram=(freq, power_est),
        peaks=peaks,
    )
Example #31
0
    def rank(self):
        #If it is not a good_df rank is 0
        if self.good_df():
            # Standard data reduction procedure
            coloredtup = WDutils.ColoredPoints(self.df)
            redpoints = coloredtup.redpoints
            bluepoints = coloredtup.bluepoints
            droppoints = np.unique(np.concatenate([redpoints, bluepoints]))

            relativetup = WDutils.relativescales(self.df)
            t_mean = relativetup.t_mean
            flux_bgsub = relativetup.flux
            flux_bgsub_err = relativetup.err

            # Store red and blue poitns
            if len(redpoints) != 0:
                t_mean_red = [t_mean[ii] for ii in redpoints]
                flux_bgsub_red = [flux_bgsub[ii] for ii in redpoints]
                flux_bgsub_err_red = [flux_bgsub_err[ii] for ii in redpoints]
            if len(bluepoints) != 0:
                t_mean_blue = [t_mean[ii] for ii in bluepoints]
                flux_bgsub_blue = [flux_bgsub[ii] for ii in bluepoints]
                flux_bgsub_err_blue = [flux_bgsub_err[ii] for ii in bluepoints]

            df_reduced = self.df.drop(index=droppoints)
            df_reduced = df_reduced.reset_index(drop=True)

            df_reduced = WDutils.df_firstlast(df_reduced)

            relativetup_reduced = WDutils.relativescales(df_reduced)
            t_mean = relativetup_reduced.t_mean
            flux_bgsub = relativetup_reduced.flux
            flux_bgsub_err = relativetup_reduced.err

            # Do FUV matching
            if self.FUVexists():
                exists = True
                fuv_tup = self.FUVmatch()
                t_mean_fuv = fuv_tup.t_mean
                flux_fuv = fuv_tup.flux
                flux_err_fuv = fuv_tup.err
            else:
                exists = False

            # Exposure metric already calcualted in init

            # Periodogram Metric
            time_seconds = df_reduced['t_mean'] * 60
            ls = LombScargle(t_mean, flux_bgsub)
            freq, amp = ls.autopower(nyquist_factor=1)

            detrad = df_reduced['detrad']
            ls_detrad = LombScargle(t_mean, detrad)
            freq_detrad, amp_detrad = ls_detrad.autopower(nyquist_factor=1)
            pgram_tup = WDranker_2.find_cPGRAM(ls,
                                               amp_detrad,
                                               exposure=self.exposure)
            # Return 0,1 rseult of recovery
            c_periodogram = pgram_tup.c
            ditherperiod_exists = pgram_tup.ditherperiod_exists

            # Welch Stetson Metric
            if exists:
                c_ws = WDranker_2.find_cWS(t_mean, t_mean_fuv, flux_bgsub,
                                           flux_fuv, flux_bgsub_err,
                                           flux_err_fuv, ditherperiod_exists,
                                           self.FUVexists())
            else:
                c_ws = WDranker_2.find_cWS(t_mean, None, flux_bgsub, None,
                                           flux_bgsub_err,
                                           None, ditherperiod_exists,
                                           self.FUVexists())

            # RMS Metric --- have to 'unscale' the magnitudes
            df_sigma_mag = median_absolute_deviation(df_reduced['mag_bgsub'])
            c_magfit = WDranker_2.find_cRMS(self.mag, df_sigma_mag, 'NUV')

            # Weights:
            w_pgram = 1
            w_expt = .2
            w_WS = .3
            w_magfit = .25

            # Calcualte and store rank
            C = ((w_pgram * c_periodogram) + (w_expt * self.c_exposure) +
                 (w_magfit * c_magfit) + (w_WS * c_ws))

        else:
            C = 0

        self.C = C