예제 #1
0
def plot_trajectory(table, sid, season=123, clear=True, fmt="k."):
    """ Takes a source from a table and plots its color-color trajectory.

    Inputs:
      table -- ATpy time-series photometry table.
      sid -- WFCAM source ID.
      
    Optional inputs: season -- Season 1,2,3 or all.  clear -- enter
      True to make a new figure when this function calls.  fmt -- a
      matplotlib plot style. Defaults to black dots.

    This is a convenience function that tries to be smart.
      """
    if clear:
        plt.figure()
    ax = plt.gca()

    tcut = data_cut(table, [sid], season)
    jmh = tcut.JMHPNT
    hmk = tcut.HMKPNT
    date = tcut.MEANMJDOBS - 54579

    plot_trajectory_core(ax, hmk, jmh, date, fmt=fmt)
    plt.show()
    return
예제 #2
0
def happy_table ( big_table, sid_list, flags=-1 ):
    ''' Basically calls data_cut in a special syntax that makes life easy. 
    Inputs:
      big_table -- a WFCAM time-series photometry table.
      sid_list -- a list or array of SOURCEIDs to cut along.
      
      '''

    return data_cut (big_table, sid_list, season=123, flags=flags)
예제 #3
0
def arraystat_2 (table, sid, season=0, rob=True, per=True, flags=0) :
    """ Calculates a complicated number of parameters for a given star.

    Inputs:
      table -- an ATpy table with time-series photometry
      sid -- a WFCAM source ID.
      
    Optional inputs:
      season -- which season to select (1,2,3, or other=All)
      rob -- also use Robust statistics? (takes longer, default True)
      per -- run period-finding? (takes longer, default True)
      flags -- Maximum ppErrBit quality flags to use (default 0)

    Returns:
      ret -- a data structure containing the computed values.
      """
    
    s_table = data_cut( table, [sid], season=season, flags=flags )

    if len(s_table) < 1:
        print "no data for %d!" % sid
        return None
    
    jcol = s_table.JAPERMAG3; jerr = s_table.JAPERMAG3ERR
    hcol = s_table.HAPERMAG3; herr = s_table.HAPERMAG3ERR
    kcol = s_table.KAPERMAG3; kerr = s_table.KAPERMAG3ERR
    jmhcol=s_table.JMHPNT   ; jmherr = s_table.JMHPNTERR
    hmkcol=s_table.HMKPNT   ; hmkerr = s_table.HMKPNTERR
    racol= s_table.RA
    decol= s_table.DEC

    date = s_table.MEANMJDOBS 

    messy_table = data_cut( table, [sid], season=-1 )
    jppcol=messy_table.JPPERRBITS
    hppcol=messy_table.HPPERRBITS
    kppcol=messy_table.KPPERRBITS

    # make an empty data structure and just assign it information, then return 
    # the object itself!!! then there's no more worrying about indices.
    class Empty():
        pass

    ret = Empty()
    
    ret.N = len(s_table)
    ret.RA = racol.mean()
    ret.DEC = decol.mean()
    
    ret.chip = get_chip(date[0], np.degrees(racol[0]), np.degrees(decol[0]))
    if ret.N > 4:
        ret.one_chip = ( get_chip(date[0], racol[0], decol[0]) ==
                         get_chip(date[1], racol[1], decol[1]) ==
                         get_chip(date[2], racol[2], decol[2]) ==
                         get_chip(date[3], racol[3], decol[3]) )
    else:
        ret.one_chip = True
    
    ret.Stetson = stetson.S(jcol, jerr, hcol, herr, kcol, kerr)
    
    ret.j = Empty();   ret.j.data = jcol;   ret.j.err = jerr
    ret.h = Empty();   ret.h.data = hcol;   ret.h.err = herr
    ret.k = Empty();   ret.k.data = kcol;   ret.k.err = kerr
    ret.jmh = Empty(); ret.jmh.data=jmhcol; ret.jmh.err = jmherr
    ret.hmk = Empty(); ret.hmk.data=hmkcol; ret.hmk.err = hmkerr

    bands = [ ret.j, ret.h, ret.k, ret.jmh, ret.hmk ]

    for b in bands:
        # use b.data, b.err
        
        b.rchi2 = reduced_chisq( b.data, b.err )

        b.mean = b.data.mean()
        b.rms = b.data.std()
        b.min = b.data.min()
        b.max = b.data.max()
        b.peak_trough = b.max - b.min

        b.mean_err = b.err.mean()

        # Robust quantifiers simply have an "r" at the end of their names
        if rob:
            b.datar = rb.removeoutliers(b.data, 3, niter=2)
            
            b.meanr = rb.meanr(b.data)
            b.rmsr = rb.stdr(b.data)
            b.minr = b.datar.min()
            b.maxr = b.datar.max()
            b.peak_troughr = b.maxr - b.minr

        # Period finding... is a little dodgy still, and might take forever
        if per:
            
            b.lsp = lsp(date, b.data, 6., 6.) # apologies if this is cluttered
            Jmax = lsp_mask(b.lsp[0], b.lsp[1])
            b.lsp_per = 1./ b.lsp[0][Jmax]
            b.lsp_pow = b.lsp[1][Jmax]
            b.fx2_per = 1./ test_analyze( date, b.data, b.err )

    # Finally we'll want to do the whole slope, distance on the JMH graph
    # (until I get the fitting done, we'll have to use hmk and jmh naively)
    ret.color_slope = (ret.jmh.peak_trough / ret.hmk.peak_trough)
    


    # and the pp_max, using the messy table
    ret.jpp_max = jppcol.max()
    ret.hpp_max = hppcol.max()
    ret.kpp_max = kppcol.max()

    return ret
예제 #4
0
def make_corrections_table ( constants, table ):
    ''' Creates a table of photometric corrections per chip per night.

    Inputs:
      constants -- an ATpy table which gives 10 constant stars per chip.
                   Columns: "SOURCEID" (13-digit int), "chip" (1-16 int)
      table -- an ATpy table with time-series photometry

    Returns:
      an ATpy table with the following format:
      THE CORRECTIONS TABLE:
      night            chip   correction_J  corr_H   corr_K
      '54582.6251067'  3      +0.13         +0.07    -0.03

    '''
    # rb.meanr(x) is the robust mean

    # First - let's compute every constant star's robust mean in each band.
    # And keep track of them.

    j_meanr = np.zeros(constants.SOURCEID.size) * 1.
    h_meanr = np.zeros(constants.SOURCEID.size) * 1.
    k_meanr = np.zeros(constants.SOURCEID.size) * 1.
    
    for sid, i in zip(constants.SOURCEID, range(constants.SOURCEID.size)):
        stable = season_cut(table, sid, 123, flags=0)
        
        j_meanr[i] = rb.meanr( stable.JAPERMAG3 )
        h_meanr[i] = rb.meanr( stable.HAPERMAG3 )
        k_meanr[i] = rb.meanr( stable.KAPERMAG3 )
        del stable
    
    try:
        constants.add_column('j_meanr', j_meanr)
        constants.add_column('h_meanr', h_meanr)
        constants.add_column('k_meanr', k_meanr)

        print "computed robust-mean magnitude for each constant star"
    except:
        print "looks like you already computed robust-mean magnitudes"
        
    # Second - Calculate mean(r) deviations for each chip for each night
    chip_list = list( set( constants.chip ) )
    
    corrections_list = [] # add tables to this list, join them up at the end

    for chip in chip_list:
        
        local_network = constants.where(constants.chip == chip)
        
        # so I'm taking all of the local stars, and for every night...
        # do i calculate it by each star first, or each night first?
        # each night first I think would work better.

        #let's grab a slice of the big table corresponding only to our 
        # favorite sources' photometry. 

        # by joining together the season_cuts from all 10 sources!
        # no that's stupid. Use an "or |" operator! this is gonna be painful

        cids= local_network.SOURCEID
        ids = table.SOURCEID
        #local_table = season_cut( table, local_network[0], 123 )

        local_table = data_cut( table, cids, 123, flags=0 ) # aww yeah

        # local_table = table.where( ( (ids == cids[0]) | # I really, really wish
        #                              (ids == cids[1]) | # I knew how to make
        #                              (ids == cids[2]) | # this more elegant.
        #                              (ids == cids[3]) | 
        #                              (ids == cids[4]) |
        #                              (ids == cids[5]) |
        #                              (ids == cids[6]) |
        #                              (ids == cids[7]) |
        #                              (ids == cids[8]) |
        #                              (ids == cids[9]) ) &
        #                            (table.JPPERRBITS <= 0) &
        #                            (table.HPPERRBITS <= 0) &
        #                            (table.KPPERRBITS <= 0) )

        # okay, now that i've got the local table... let's get each night's
        # meanr deviation.
        # first, let's make some dates to iterate through
        date_list = list( set( local_table.MEANMJDOBS ) )
        
        # at some point i need to make a structure to save the corrections to

        ld = len(date_list)
        
        date_arr = np.zeros(ld)
        j_correction = np.zeros(ld)
        h_correction = np.zeros(ld)
        k_correction = np.zeros(ld)
        chip_arr = chip * np.ones(ld, dtype=int)

        # get each night's correction!
        for date, j in zip( date_list, range(ld) ):
            
            # a temporary place to keep the individual deviations
            j_deviation = np.zeros_like(cids) * 1.
            h_deviation = np.zeros_like(cids) * 1.
            k_deviation = np.zeros_like(cids) * 1.

            for star, i in zip(cids, range(cids.size) ):
                star_night_row = local_table.where( 
                    (local_table.SOURCEID == star) &
                    (local_table.MEANMJDOBS == date) )

                # deviation: the meanr minus that night's magnitude.
                j_deviation[i] = (constants.j_meanr[constants.SOURCEID==star]- 
                                  star_night_row.JAPERMAG3 )
                h_deviation[i] = (constants.h_meanr[constants.SOURCEID==star]- 
                                  star_night_row.HAPERMAG3 )
                k_deviation[i] = (constants.k_meanr[constants.SOURCEID==star]- 
                                  star_night_row.KAPERMAG3 )

            date_arr[j] = date
            j_correction[j] = -rb.meanr(j_deviation)
            h_correction[j] = -rb.meanr(h_deviation)
            k_correction[j] = -rb.meanr(k_deviation)


        # make a table for each chip, and (at the end) 
        # add it to the corrections_list. We'll join them up at the end.

        correction_subtable = atpy.Table(name="The Corrections Table")
        # add_column( 'name', data )
        correction_subtable.add_column('date', date_arr)
        correction_subtable.add_column('chip', chip_arr)
        correction_subtable.add_column('j_correction', j_correction)
        correction_subtable.add_column('h_correction', h_correction)
        correction_subtable.add_column('k_correction', k_correction)

        corrections_list.append( correction_subtable )

    #whoo, finally!

    correction_table = corrections_list[0]
    for subtable in corrections_list[1:] :
        correction_table.append( subtable )

    return correction_table

    '''