Esempio n. 1
0
    def __init__(self, table, sid, date_offset=0, name=None, abridger=None):

        self.sid = sid
        self.date_offset = date_offset

        # Loading data
        self.s_table = data_cut (table, sid)

        if len(self.s_table) == 0:
            raise ValueError("no data here")

        self.min_date = self.s_table.MEANMJDOBS.min() - date_offset
        self.max_date = self.s_table.MEANMJDOBS.max() - date_offset

        self.abridger = abridger

        if name is not None:
            self.name = name
        else:
            self.name = ''
def S_sid (table, sid, season=0, flags=256) :
    """ 
    Calculates the Stetson J index for a given source. 

    Parameters
    ----------
    table : atpy.Table 
        Table with time-series photometry and grades
    sid : int 
        a Source ID from WFCAM (13 digits)
    season : int, optional 
        Which observing season of our dataset (1,2, 3, or all)
    flags : int, optional
        Minimum data flag cutoff for ppErrBits.

    Returns
    -------
    S : float
        The Stetson variability index "J" over the star's 3 input bands.

    """
    from helpers3 import data_cut

    s_table = data_cut(table, sid, season, flags=flags)
    jcol = s_table.JAPERMAG3
    hcol = s_table.HAPERMAG3
    kcol = s_table.KAPERMAG3
    jerr = s_table.JAPERMAG3ERR
    herr = s_table.HAPERMAG3ERR
    kerr = s_table.KAPERMAG3ERR
    jgrade = s_table.JGRADE
    hgrade = s_table.HGRADE
    kgrade = s_table.KGRADE


    return S (jcol, jerr, jgrade, hcol, herr, hgrade, kcol, kerr, kgrade)
Esempio n. 3
0
def star_slope( table, sid, xband='hmk', yband='k', flags=0, 
                verbose=True, null=np.double(-9.99999488e+08)):
    """
    Calculates the color slope, given an input table and ID.

    Parameters
    ----------
    table : atpy.Table
        Table with time-series photometry
    sid : int
        13-digit WFCAM source ID of star to plot
    xband : {'jmh', 'hmk'}
        The x-axis array to use for the slope. Default 'k'.
    yband : {'j', 'jmh', 'hmk'}
        The y-axis array to use for the slope. Default 'hmk'.
    flags : int, optional 
        Maximum ppErrBit quality flags to use (default 0).
    verbose : bool. optional
        Whether to print a verbose output. Default true.
   
    Returns
    -------
    slope : float
        Slope (in rise/run) of the linear fit.
    intercept : float
        Y-value where the linear fit intercepts the Y-axis.
    slope_error : float
        The standard error on the fitted slope: an indication of fit quality.

    """

    if (xband not in ['jmh', 'hmk']) or (yband not in ['j', 'jmh', 'k']):
        raise ValueError("Incorrect argument to `xband` or `yband`")

    # define this dict thing
    band_dict = {'j':'JAPERMAG3', 'k':'KAPERMAG3', 
                 'jmh':'JMHPNT', 'hmk':'HMKPNT'}


    # Loading data
    s_table = data_cut (table, sid, season=0)

    if len(s_table) == 0:
        print "no data here"
        return


    j_table = band_cut( s_table, 'j', max_flag=flags)
    h_table = band_cut( s_table, 'h', max_flag=flags)
#    k_table = band_cut( s_table, 'k', max_flag=flags)
    
    jh_table = band_cut( j_table, 'h', max_flag=flags)
    hk_table = band_cut( h_table, 'k', max_flag=flags)
 #   jk_table = band_cut( j_table, 'k', max_flag=flags)

    jhk_table = band_cut( jh_table, 'k', max_flag=flags)
    

    if (xband, yband) == ('hmk', 'k'):
        data = hk_table
    elif (xband, yband) == ('jmh', 'j'):
        data = jh_table
    elif (xband, yband) == ('hmk', 'jmh'):
        data = jhk_table
    else:
        data = jhk_table
        print "Incorrect combination of `xband`, `yband`."

    x_array = data.data[band_dict[xband]]
    xerr_array = data.data[band_dict[xband]+"ERR"]
    y_array = data.data[band_dict[yband]]
    yerr_array = data.data[band_dict[yband]+"ERR"]
    
    return slope(x_array, y_array, xerr_array, yerr_array, verbose)
Esempio n. 4
0
def statcruncher (table, sid, season=0, rob=True, per=True, 
                  graded=False, colorslope=False, flags=0) :
    """ 
    Calculates several statistical properties for a given star.

    Will work with "lonely" datapoints (i.e. not all JHK mags are 
    well-defined). Optionally works with graded data, too!

    Parameters
    ----------
    table : atpy.Table
        Table with time-series photometry
    sid : int
        13-digit WFCAM source ID of star to plot
    season : int, optional
        Which observing season of our dataset (1, 2, 3, or all).
        Any value that is not the integers (1, 2, or 3) will be 
        treated as "no season", and no time-cut will be made.
        Note that this is the default behavior.
    rob : bool, optional 
        Use robust statistics, in addition to normal ones?
        (takes longer, default True)
    per : bool, optional 
        Run period-finding? Uses fast chi-squared and lomb-scargle.
        (takes longer, default True)
    graded : bool, optional
        Also calculate Stetson indices using quality grades as weights?
        Uses stetson_graded; requires that the data has been graded by
        night_cleanser.null_cleanser_grader().
    colorslope : bool, optional
        Calculate color slopes? Runs them over (JvJ-H, KvH-K, J-HvH-K).
        Make sure your data has been color-error-corrected! Default False.
    flags : int, optional 
        Maximum ppErrBit quality flags to use (default 0)

    Returns
    -------
    ret : data structure 
        Contains the computed values.
        They can be accessed as attributes 
        (e.g., "ret.j_mean" or "ret.Stetson").

    """
    
    s_table = data_cut ( table, sid, season=season)

    if len(s_table) < 1:
        print "no data for %d!" % sid
        return None

    # First, let's compute single-band statistics. This will require
    # separate data_cuts on each band.

    full_jtable = band_cut(s_table, 'j')
    full_htable = band_cut(s_table, 'h')
    full_ktable = band_cut(s_table, 'k')

    j_table = band_cut(s_table, 'j', max_flag=flags)
    h_table = band_cut(s_table, 'h', max_flag=flags)
    k_table = band_cut(s_table, 'k', max_flag=flags)

    jmh_table = band_cut(j_table, 'h', max_flag=flags)
    hmk_table = band_cut(h_table, 'k', max_flag=flags)
    
    # jhk_table used only for colorslope
    jhk_table = band_cut( jmh_table, 'k', max_flag=flags)

    # get a date (x-axis) for each 
    jdate = j_table.MEANMJDOBS
    hdate = h_table.MEANMJDOBS
    kdate = k_table.MEANMJDOBS
    jmhdate = jmh_table.MEANMJDOBS
    hmkdate = hmk_table.MEANMJDOBS
#    date = s_table.MEANMJDOBS 
    
    # get a magnitude and magnitude error for each band
    jcol = j_table.JAPERMAG3; jerr = j_table.JAPERMAG3ERR
    hcol = h_table.HAPERMAG3; herr = h_table.HAPERMAG3ERR
    kcol = k_table.KAPERMAG3; kerr = k_table.KAPERMAG3ERR
    jmhcol= jmh_table.JMHPNT; jmherr = jmh_table.JMHPNTERR
    hmkcol= hmk_table.HMKPNT; hmkerr = hmk_table.HMKPNTERR

    # get the RA and DEC columns, checking for sensible values
    racol= s_table.RA[(s_table.RA > 0) & (s_table.RA < 7)]
    decol= s_table.DEC[(s_table.DEC > -4) & (s_table.DEC < 4)]

    # Now let's get some ability to track errorful data.
    # messy_table_j = band_cut( s_table, 'j')
    # messy_table_h = band_cut( s_table, 'h')
    # messy_table_k = band_cut( s_table, 'k')
    # jppcol = messy_table_j.JPPERRBITS
    # hppcol = messy_table_h.HPPERRBITS
    # kppcol = messy_table_k.KPPERRBITS

    # make an empty data structure and just assign it information, then return 
    # the object itself! then there's no more worrying about indices.
    class Empty():
        pass

    ret = Empty()
    
    # How many nights have observations in each band?
    ret.N_j = len(j_table)
    ret.N_h = len(h_table)
    ret.N_k = len(k_table)

    # What's the distribution of flags and nights?
    js = full_jtable.JPPERRBITS
    hs = full_htable.HPPERRBITS
    ks = full_ktable.KPPERRBITS

    ret.N_j_noflag = len(js[js == 0])
    ret.N_h_noflag = len(hs[hs == 0])
    ret.N_k_noflag = len(ks[ks == 0])

    ret.N_j_info = len(js[(js < 256) & (js > 0)])
    ret.N_h_info = len(hs[(hs < 256) & (hs > 0)])
    ret.N_k_info = len(ks[(ks < 256) & (ks > 0)])

    ret.N_j_warn = len(js[ js >= 256 ])
    ret.N_h_warn = len(hs[ hs >= 256 ])
    ret.N_k_warn = len(ks[ ks >= 256 ])


    # Mean position of this source
    ret.RA = racol.mean()
    ret.DEC = decol.mean()
    
    # Calculate the Stetson index...
    S, choice, stetson_nights = Stetson_machine (s_table, flags)
    
    ret.Stetson = S
    ret.Stetson_choice = choice
    ret.Stetson_N = stetson_nights

    if graded:
        # Calculate the graded Stetson index...
        g_S, g_choice, g_stetson_nights = (
            graded_Stetson_machine (s_table, flags) )
    
        ret.graded_Stetson = g_S
        ret.graded_Stetson_choice = g_choice
        ret.graded_Stetson_N = g_stetson_nights


    # Calculate PSTAR parameters
    ret.pstar_mean = s_table.PSTAR.mean()
    ret.pstar_median = np.median(s_table.PSTAR)
    ret.pstar_rms = s_table.PSTAR.std()

    # Create parallel data structures for each band, so we can iterate
    ret.j = Empty(); ret.j.data = jcol; ret.j.err = jerr; ret.j.date = jdate   
    ret.h = Empty(); ret.h.data = hcol; ret.h.err = herr; ret.h.date = hdate
    ret.k = Empty(); ret.k.data = kcol; ret.k.err = kerr; ret.k.date = kdate
    ret.jmh = Empty(); ret.jmh.data=jmhcol; ret.jmh.err = jmherr 
    ret.hmk = Empty(); ret.hmk.data=hmkcol; ret.hmk.err = hmkerr
    ret.jmh.date = jmhdate; ret.hmk.date = hmkdate

    ret.j.N = ret.N_j ; ret.h.N = ret.N_h ; ret.k.N = ret.N_k
    ret.jmh.N = len(jmh_table) ; ret.hmk.N = len(hmk_table)

    bands = [ ret.j, ret.h, ret.k, ret.jmh, ret.hmk ]

    for b in bands:
        # use b.data, b.err
        
        # if this band is empty, don't try to do the following assignments
        if b.N == 0: continue

        b.rchi2 = reduced_chisq( b.data, b.err )

        b.mean = b.data.mean()
        b.median = np.median(b.data) # dao
        b.rms = b.data.std()
        b.min = b.data.min()
        b.max = b.data.max()
        b.range = b.max - b.min

        b.err_mean = b.err.mean() #dao
        b.err_median = np.median(b.err) #dao
        b.err_rms = b.err.std() #dao
        b.err_min = b.err.min() #dao
        b.err_max = b.err.max() #dao
        b.err_range = b.err_max - b.err_min #dao


        # Robust quantifiers simply have an "r" at the end of their names
        if rob:
            b.datar, b.indr = rb.removeoutliers(b.data, 3, niter=2, retind=True)
            b.errr = b.err[b.indr]
            
            b.meanr = rb.meanr(b.data)
            b.medianr = rb.medianr(b.data) # dao
            b.rmsr = rb.stdr(b.data)
            b.minr = b.datar.min()
            b.maxr = b.datar.max()
            b.ranger = b.maxr - b.minr

            b.err_meanr = b.errr.mean() # dao
            b.err_medianr = np.median(b.errr) #dao
            b.err_rmsr = b.errr.std() #dao
            b.err_minr = b.errr.min() #dao
            b.err_maxr = b.errr.max() #dao
            b.err_ranger = b.err_maxr - b.err_minr #dao

        # Period finding... is a little dodgy still, and might take forever
        if per==True and b.N > 2:

            hifac = lsp_tuning(b.date)
            
            b.lsp = lsp(b.date, b.data, 6., hifac) 
            Jmax = lsp_mask(b.lsp[0], b.lsp[1])
            b.lsp_per = 1./ b.lsp[0][Jmax]
            b.lsp_pow = b.lsp[1][Jmax]
            b.lsp_sig = getSignificance(b.lsp[0], b.lsp[1], b.lsp[2], 6.)[Jmax]

            best_freq, chimin = test_analyze( b.date, b.data, b.err, 
                                              ret_chimin=True )

            b.fx2_per, b.fx2_chimin = 1./best_freq, chimin
            

    if colorslope:
        # J vs J-H : use jmh_table exclusively
        (ret.jjh_slope, a, ret.jjh_slope_err) = (
            slope( jmh_table.JMHPNT, jmh_table.JAPERMAG3, 
                   jmh_table.JMHPNTERR, jmh_table.JAPERMAG3ERR, 
                   verbose=False) )
        # K vs H-K : use hmk_table exclusively
        (ret.khk_slope, a, ret.khk_slope_err) = (
            slope( hmk_table.HMKPNT, hmk_table.KAPERMAG3, 
                   hmk_table.HMKPNTERR, hmk_table.KAPERMAG3ERR,
                   verbose=False) )
        # J-H vs H-K : use jhk_table exclusively
        (ret.jhk_slope, a, ret.jhk_slope_err) = (
            slope( jhk_table.HMKPNT, jhk_table.JMHPNT, 
                   jhk_table.HMKPNTERR, jhk_table.JMHPNTERR,
                   verbose=False) )
        
    # and the pp_max, using the messy table
    # (slated for a re-implementation)
    # ret.jpp_max = jppcol.max()
    # ret.hpp_max = hppcol.max()
    # ret.kpp_max = kppcol.max()

    return ret