Example #1
0
def make_map(start, stop):
    outname = start.strftime('%Y%m%d_%H%M_%0.3dghz.h5')
    if os.path.exists(os.path.join(OUTPUT_DIR, outname % 150)):
        # don't repeat work
        pass
    data = SPTDataReader(start_date=start, stop_date=stop, quiet=True)
    data.readData(start,
                  stop,
                  correct_global_pointing=False,
                  downsample_bolodata=4)
    ptsrc_file = '/home/ndhuang/spt_code/sptpol_software/config_files/ptsrc_config_ra23h30dec-5520101118_203532.txt'
    map_args = {
        'good_bolos': [
            'optical_bolometers', 'no_c4', 'bolometer_flags', 'has_pointing',
            'has_polcal', 'timestream_flags', 'elnod', 'calibrator',
            'good_timestream_weight', 'full_pixel'
        ],
        'reso_arcmin':
        1,
        'proj':
        1,
        'map_shape': (25, 70),
        'timestream_filtering': {
            'poly_order': 1,
            'ell_lowpass': 6600
        },
        'use_c_pointing':
        False
    }
    print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
    print 'Starting mapping'
    _map = quickmap(data, **map_args)
    for band in _map:
        _map[band].writeToHDF5(os.path.join(OUTPUT_DIR, outname % int(band)),
                               overwrite=True)
Example #2
0
def loadDataForAuxFile(aux):
    if isinstance(aux, str):
        aux = files.read(aux)
    data = SPTDataReader(aux.header.start_date,
                         aux.header.stop_date,
                         quiet=True)
    data.readData()
    return data
def readInterval(interval):
    '''Just read some data'''
    data = SPTDataReader(interval[0], interval[1], quiet=True)
    data.readData(
        interval[0],
        interval[1],
        # options we may want to use
        correct_global_pointing=False,
        standardize_samplerates=False)
    return data
def getTimes(dqdir='/home/sptdat/public_html/data_quality/fridge_cycles'):
    subds = sorted(os.listdir(dqdir))[-12:]
    for s in subds:
        f = open(path.join(dqdir, s, 'cycle_stats.txt'))
        stats = f.readlines()[1].split()
        start = stats[0]
        end = stats[1]
        data = SPTDataReader(start,
                             end,
                             master_configfile='sptpol_stripped_master_config')
        data.readData(correct_global_pointing=False, process_psds=False)
        mkPlot(data, start)
Example #5
0
def readInterval(interval, data = None):
    start = interval[0]
    stop = interval[1]
    if data is None:
        data = SPTDataReader(start, stop, 
                             master_configfile = 
                             "sptpol_stripped_master_config",
                             quiet = True)
    data.readData(start, stop, correct_global_pointing = False,
                  standardize_samplerates = False, unlimited_time = True,
                  zero_processing = True)
    return data
Example #6
0
def readInterval(interval):
    '''Just read some data'''
    interval = logs.readSourceScanTimes(interval[0],
                                        interval[1],
                                        source,
                                        nscans_min=50)[0]
    data = SPTDataReader(interval[0],
                         interval[1],
                         quiet=True,
                         config_file="sptpol_stripped_master_config")
    data.readData(interval[0],
                  interval[1],
                  correct_global_pointing=False,
                  standardize_samplerates=False)
    return data
Example #7
0
def make_map(interval):
    start = interval[0]
    stop = interval[1]
    outname = start.strftime('%Y%m%d_%H%M_%0.3dghz.h5')
    if os.path.exists(os.path.join(OUTPUT_DIR, outname % 150)):
        return None
    data = SPTDataReader(start_date=start, stop_date=stop, quiet=True)
    data.readData(start,
                  stop,
                  correct_global_pointing=False,
                  downsample_bolodata=4,
                  timestream_units='k_cmb')
    ptsrc_file = '/home/ndhuang/spt_code/sptpol_software/config_files/ptsrc_config_ra23h30dec-5520101118_203532.txt'
    map_args = {
        'good_bolos': [
            'optical_bolometers', 'no_c4', 'bolometer_flags', 'has_pointing',
            'has_polcal', 'timestream_flags', 'elnod', 'calibrator',
            'good_timestream_weight', 'full_pixel'
        ],
        'reso_arcmin':
        10,
        'proj':
        5,
        'map_shape': (25, 45),
        'timestream_filtering': {
            'poly_order': 1,
            'ell_lowpass': 6600,
            'masked_highpass': 0.1,
            'dynamic_source_mask': False,
            'pointsource_file': ptsrc_file
        }
    }
    print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
    print 'Starting mapping'
    _map = quickmap(data, **map_args)
    for band in _map:
        _map[band].writeToHDF5(os.path.join(OUTPUT_DIR, outname % int(band)),
                               overwrite=True)
    return None
Example #8
0
def make_idf(interval, ok_time):
    start = interval[0]
    stop = interval[1]
    # if os.path.exists(os.path.join(OUTPUT_DIR, 'left', outname %150)):
    #     return None
    # try:
    #     os.makedirs(os.path.join(OUTPUT_DIR, 'left'))
    #     os.makedirs(os.path.join(OUTPUT_DIR, 'right'))
    # except OSError:
    #     pass
    inter_time = start + ok_time
    idf_num = 0
    while inter_time < stop:
        # data = SPTDataReader(inter_time - ok_time, inter_time, verbose = True)
        # data.readData(correct_global_pointing = False, downsample_bolodata = 8, timestream_units = 'power')
        # idf150 = MapIDF(data, freq = 150)
        # idf150.writeToHDF5(os.path.join(OUTPUT_DIR, 'idf_150ghz_{:02d}.h5'.format(idf_num)),
        #                    as_stub = False, overwrite = True)
        # idf090 = MapIDF(data, freq = 90)
        # idf090.writeToHDF5(os.path.join(OUTPUT_DIR, 'idf_090ghz_{:02d}.h5'.format(idf_num)),
        #                    as_stub = False, overwrite = True)
        idf_num += 1
        inter_time += ok_time
    data = SPTDataReader(inter_time - ok_time, stop, verbose=True)
    data.readData(correct_global_pointing=False,
                  downsample_bolodata=8,
                  timestream_units='power')
    idf150 = MapIDF(data, freq=150)
    idf150.writeToHDF5(os.path.join(OUTPUT_DIR,
                                    'idf_150ghz_{:02d}.h5'.format(idf_num)),
                       as_stub=False,
                       overwrite=True)
    idf090 = MapIDF(data, freq=90)
    idf090.writeToHDF5(os.path.join(OUTPUT_DIR,
                                    'idf_090ghz_{:02d}.h5'.format(idf_num)),
                       as_stub=False,
                       overwrite=True)
    return
Example #9
0
def make_idf_pieces(start, stop):
    intervals = [[start, '03-Feb-2015:06:21:41'],
                 ['03-Feb-2015:06:21:48', '03-Feb-2015:08:20:37'],
                 ['03-Feb-2015:08:20:44', '03-Feb-2015:10:20:44'],
                 ['03-Feb-2015:10:20:53', stop]]
    idf_num = 0
    for start, stop in intervals:
        data = SPTDataReader(start, stop, verbose=True)
        data.readData(correct_global_pointing=False,
                      downsample_bolodata=8,
                      timestream_units='power',
                      verbose=True)
        idf150 = MapIDF(data, freq=150)
        idf150.writeToHDF5(os.path.join(
            OUTPUT_DIR, 'idf_150ghz_{:02d}.h5'.format(idf_num)),
                           as_stub=False,
                           overwrite=True)
        idf090 = MapIDF(data, freq=90)
        idf090.writeToHDF5(os.path.join(
            OUTPUT_DIR, 'idf_090ghz_{:02d}.h5'.format(idf_num)),
                           as_stub=False,
                           overwrite=True)
        idf_num += 1
    return
Example #10
0
    #          ['140328 10:26:20', '140328 12:57:40'], # dither 12
    #          ['140329 22:28:40', '140330 01:02:44'], # dither 18
    #          ['140331 12:59:38', '140331 15:32:02'], # dither 0
    #          ['140403 07:49:00',
    #           SptDatetime.now().strftime('%y%m%d %H:%M:%S')]]
    times = [
        ['140525 05:01:19', '140525 07:40:23'],  # dither 0
        ['140525 14:16:48', '140525 16:57:01']
    ]  # dither 9
    OUTPUT_DIR = '/data/ndhuang/fast_500d_map/run1/proj1'
    datas = []
    for t in times:
        realtimes = logs.readSourceScanTimes(t[0],
                                             t[1],
                                             'ra0hdec-57.5',
                                             nscans_min=0)
        for rt in realtimes:
            start = SptDatetime(rt[0])
            stop = SptDatetime(rt[1])
            data = SPTDataReader(start_date=start, stop_date=stop, quiet=True)
            data.readData(start, stop, correct_global_pointing=False)
            datas.append(data)
            break
            # make_map(start, stop)

    # coadd = sum(maps)
    # ma = MapAnalyzer(delta_ell=25, set_special_bb_bins = False)
    # cls = ma.calculateCls()
    # pl.plot(cls.ell.TT, cls.TT)
    # savefig('/home/ndhuang/plots/ra0hdec-57.5_fast_TT.png')
def consolidate_source_scan(start_date=None, stop_date=None, fitsdir='/data/sptdat/auxdata/', \
                            sources_to_use=['rcw38','mat5a'], filename=None, savename=None, doall=True, dosave=True):

    """
    Translated from RK's original consolidate_source_scan.pro.
    
    The purpose of this program is to consolidate the information contained in the
    source_scan fits files which is relevant to pointing.  The relevant info is stuffed
    into a dictionary and saved as a pkl file.

    ===Things that need to be saved in this pickle file===
    XDEG, YDEG, AMP, DC, ID
    STARTTIME,STARTTIME_MJD, STOPTIME, STOPTIME_MJD, MEAN_MJD
    SOURCE
    FILENAME
    TEMP_AVG, WIND_SPEED_AVG, WID_DIR_AVG, PRESSURE_AVG, TIPPER_TAU_MEAN
    AZ_ACTUAL, EL_ACTUAL
-    MEAN_AZ, MEAN_EL
    WNOISE, ELNOD_RESPONSE, ELNOD_SIGMA, CAL_RESPONSE, CAL_SIGMA
    LINEAR_SENSOR data (DET, DEL, DAZ, L1, L2, R1, R2)
    NSCANS, NFRAMES, NSAMPLES
    STRUCTURE THERMOMETRY
    ===

    Originally created by RK, 24 Sep 2008 in IDL.
    Translated to python by JWH, Oct 2012.
    """

    nbolo = 1599

    # Grab the filenames. Convert name strings to dates, and sort the filenames by date.
    filelist = np.array(tools.flatten([[glob(os.path.join(fitsdir, 'source/*'+srcname+'*.fits')) for srcname in sources_to_use], \
                                      [glob(os.path.join(fitsdir, 'source_vfast/*'+srcname+'*.fits')) for srcname in sources_to_use]]))
    filelist_times = time.filenameToTime(filelist)
    time_argsort = np.argsort(filelist_times)
    filelist = filelist[time_argsort]
    filelist_times = filelist_times[time_argsort]
    
    if start_date == None: 
        start_date = filelist_times[0]
    else:
        start_date = time.SptDatetime(start_date)
    if stop_date == None: 
        stop_date = filelist_times[-1]
    else:
        stop_date = time.SptDatetime(stop_date)
    
    #Figure out which filenames to read given the start and stop dates.
    times_to_use = np.array(map(lambda x: start_date <= x <= stop_date, filelist_times))
    print 'Consolidating source scans between ', start_date, ' and ', stop_date, '.'

    if not times_to_use.any():
        print 'There are no source scans in that date range...'
        return
    else:
        print 'There are %d source observations in that date range.' % np.sum(times_to_use)
        filelist = filelist[times_to_use]
    nlist = len(filelist)
    
    #Record the system time
    clocka = comptime()

    #Restore the old dictionary if you gave filename to the function.
    if filename == None:
        filename = '/home/jhenning/sptpol_code/sptpol_software/analysis/pointing/source_scan_structure.pkl'
    if savename == None:
        savename = filename

    if doall == False:
        sold = pk.load(open(filename, 'r'))

    
    #Create an empty list of dictionaries to fill with the data that we need.
    s = []
    for i in range(nlist):
        
        s.append({'xdeg':np.zeros(nbolo), 'ydeg':np.zeros(nbolo),
         'amp':np.zeros(nbolo), 'dc':np.zeros(nbolo), 'id':np.zeros(nbolo, dtype=np.int16),
         'starttime':'', 'starttime_mjd':0.0, 
         'stoptime':'', 'stoptime_mjd':0.0, 'mean_mjd':0.0, 
         'source':'', 'filename':'',
         'temp_avg':0.0, 'wind_speed_avg':0.0, 'wind_dir_avg':0.0, 'pressure_avg':0.0,
         'tipper_tau_mean':0.0,
         'az_actual':np.zeros(nbolo), 'el_actual':np.zeros(nbolo),
         'mean_az':0.0, 'mean_el':0.0,
         'focus_position':np.zeros(6),
         'wnoise':np.zeros(nbolo), 'elnod_response':np.zeros(nbolo), 'elnod_sigma':np.zeros(nbolo),
         'cal_response':np.zeros(nbolo), 'cal_sigma':np.zeros(nbolo),
         'med_daz':0.0, 'med_del':0.0, 'med_det':0.0,
         'med_l1':0.0, 'med_l2':0.0, 'med_r1':0.0, 'med_r2':0.0,
         'nscans':0.0, 'nframes':0.0, 'nsamples':0.0,
         'med_scu_temp':np.zeros(60), 'mean_scu_temp':np.zeros(60)})

    #Now loop over each source scan and grab each of these data.
    nnew=0
    for i in range(nlist):
        timeb = comptime()
        print str(i), '/', str(nlist-1)
        print '...checking ', filelist[i]

        #Check to see if the old dictionary list has this file's information.
        if doall == True: pass
        else:
            wh_already = np.where(filelist[i] == np.array([item['filename'] for item in sold]))[0]
            if len(wh_already) > 1:
                print 'This file is in the source scan summary more than once!... '
                print 'Pausing so you can do something about it.'
                raw_input('Press ENTER to continue...')

            if len(wh_already)==1:
                print 'This file has already been incorporated into the source scan summary... '
                print 'Packing old results and moving to next source scan.'
                #Stuff the new dictionary with the old information.
                s[i] = sold[wh_already[0]]
                continue


        #Okay, let's start grabbing information from new source scans.
        nnew += 1

        data = 0
        print '...reading FITS file...'
        print '...loading ', filelist[i]
        data = files.read(filelist[i])
        #nframes = data.observation.n_frames
        #nscans = data.observation.n_scans
        #nsamples = data.observation.n_samples

        #For now, load in the data from the archive for the observation time to grab
        #raw pointing information etc.  Eventually, these should be saved with the source fits.
        #extra = SPTDataReader(start_date=time.SptDatetime(data.header.start_date), \
        #                      stop_date = time.SptDatetime(data.header.stop_date))
        #extra.readData(verbose=False, timestream_units='watts', correct_global_pointing=False)

        #Double-check that the data has as observation structure.
        if hasattr(data, 'observation') == False:
            #print "   Missing an observation structure! Skipping observation %s." % data.header.start_date.arc
            print "   Missing an observation structure! Skipping observation %s." % data.from_filename
            nnew -= 1
            continue

        #Fill in auxiliary data for this scan.
        #Creat an SPTDataReader object to get ancillary information about the observation.
        da = SPTDataReader(start_date=data.header.start_date,
                           stop_date=data.header.stop_date, quiet=True)
        data.auxdata_keys = da.auxdata_keys
        data.rec = da.rec
        data.cuts = da.cuts
        data.telescope = da.telescope
        data.config = da.config
        try:
            #Let's not read in noise data.
            #files.fillAuxiliaryData(data, obs_types=['calibrator','elnod','noise'],
            files.fillAuxiliaryData(data, obs_types=['calibrator','elnod'],
                                    same_fridge_cycle=True, overwrite=False)
        except Exception, err:
            print "   Couldn't copy in auxiliary data: %s" % str(err)
            nnew -= 1
            continue
        
        #Finally, let's start pulling out some of the values we're looking for.
        xdeg = data.observation.bolo_x_offset
        ydeg = data.observation.bolo_y_offset
        bolo_id = data.observation.bolo_id_index_in_arc_file
        starttime = data.observation.start_time
        stoptime = data.observation.stop_time
        starttime_mjd = time.SptDatetime(starttime).mjd
        stoptime_mjd = time.SptDatetime(stoptime).mjd
        mean_mjd = np.mean([starttime_mjd, stoptime_mjd])

        #Is this a valid source scan?
        wh = np.where(xdeg == xdeg)
        nwh = len(wh)
        if nwh == 0:
            print "   Not a valid source scan. Skipping observation %s." % data.header.start_date.arc
            nnew -= 1
            continue
        
        #Make sure calibrator and noise auxiliary info is present.
        #Be sure to skip nan's otherwise np.std() will return nan no matter how many 
        #good bolo data there are.
        try:
            #if ( np.std(data.observation.bolo_cal_response[np.isfinite(data.observation.bolo_cal_response)]) == 0. or
            #     np.std(data.observation.bolo_psd_white_noise_level[np.isfinite(data.observation.bolo_psd_white_noise_level)] == 0.0)):
            if ( np.std(data.observation.bolo_cal_response[np.isfinite(data.observation.bolo_cal_response)]) == 0.):
                #print "   Missing calibrator or noise data. Skipping this observation."
                print "   Missing calibrator data. Not worrying about noise... Skipping this observation."
                nnew -= 1
                continue
        except AttributeError, err:
            # If we couldn't get calibrator and/or noise data, skip this observation.
            print err
            nnew -= 1
            continue
Example #12
0
from sptpol_software.autotools.utils import mkdir_p
from sptpol_software.data.readout import SPTDataReader
from sptpol_software.util.time import SptDatetime
from pylab import *
import sys

sdt = SptDatetime
data = SPTDataReader(master_configfile='sptpol_stripped_master_config')
plotdir = '/home/ndhuang/code/test_plots/tracking/'
start = []
end = []
titles = []

# very old lead/trail
titles.append('Very Old L-T')
start.append(sdt('120429 04:05:31'))
end.append(sdt('120429 07:05:31'))

# old lead/trail
titles.append('Old L-T')
start.append(sdt('120521 10:17:19'))
end.append(sdt('120521 13:17:19'))

# old lead/trail
titles.append('Old L-T')
start.append(SptDatetime('120604 16:38:12'))
end.append(SptDatetime('120604 19:38:12'))

# last good lead/trail
titles.append('Last Good L-T')
start.append(SptDatetime('120616 00:08:10'))
Example #13
0
def analyzeData(time_interval, idf_band=None):
    """
    Nearly the generic function, but we return the result of the analysis function instead of 
    the data object that we read out.
    """
    # This is a bit of a kludge to let us analyze both 150 GHz and 90 GHz IDFs with the same script.
    # When the autoprocessor calls analyzeData, we'll get the default argument idf_band=None.
    # This bit of code will separately run the 150 GHz and 90 GHz mapmaking, then combine the
    # results and return them.
    if read_from_idf and idf_band is None:
        if do_left_right:
            # Do mapmaking for each of the bands in turn.
            analysis_result150, analysis_result150_left, analysis_result150_right = analyzeData(time_interval, idf_band=150)
            analysis_result90, analysis_result90_left, analysis_result90_right = analyzeData(time_interval, idf_band=90)
        
            # Combine the dictionaries of maps.
            analysis_result150.update(analysis_result90)
            analysis_result150_left.update(analysis_result90_left)
            analysis_result150_right.update(analysis_result90_right)
        
            # Return the combined results.
            return analysis_result150, analysis_result150_left, analysis_result150_right
                   
        else:
            # Do mapmaking for each of the bands in turn.
            analysis_result150 = analyzeData(time_interval, idf_band=150)
            analysis_result90 = analyzeData(time_interval, idf_band=90)
        
            # Combine the dictionaries of maps.
            analysis_result150.update(analysis_result90)
        
            # Return the combined results.
            return analysis_result150 


    # Check if we're reading from an IDF or going directly from archive files.
    if read_from_idf:
        if my_name == 'ra0hdec-57.5':
            print '2013 field...'
            idf_filename = os.path.join(directories['idf_dir'],'data','%s_idf_%s_%03ighz.h5' % (my_name, time_interval[0].file, idf_band))
        else:
            idf_filename = os.path.join(directories['idf_dir'],my_name,'data','%s_idf_%s_%03ighz.h5' % (my_name, time_interval[0].file, idf_band))
        data = read(idf_filename, timeit=True)
        if not data:
            raise ValueError("I couldn't find an IDF for the %s observation taken at %s. (No file at %s .)"
                             % (my_name, time_interval[0].archive, idf_filename))
        # test if data is in ra range you want
        if ra_cut:
            if (data.observation.mean_ra < ra_cut[0]) or (data.observation.mean_ra > ra_cut[1]):   
                raise ValueError("Skipping this IDF b/c wrong RA range: %s observation taken at %s. (file at %s .)"
                             % (my_name, time_interval[0].archive, idf_filename))

        data._readConfigs() # We need the auxdata_keys information.
        
        # The IDF generator may have flagged some scans in some timestreams as bad. 
        # With such a huge source, the RMS flagger's output is suspect (in fact, very 
        # bad over the source), so remove all timestream flags.
        for scan in data.scan:
            scan.is_bad_channel[:]=False

        #Correct pointing after the fact.
        if correct_pointing:
            op.applyOfflinePointing(data, model='SPT', overwrite_global=True,
                                flags=flags, overwrite=True)

        #Grab the thermometry and metrology data.
        thermo_data = {'tracker.encoder_off':np.median(data.antenna.track_enc_off, axis=1), \
                       'tracker.horiz_mount':np.median(data.antenna.track_hor_mnt, axis=1), \
                       'tracker.horiz_off':np.median(data.antenna.track_hor_off, axis=1), \
                       #'tracker.tilt_xy_avg':np.median(data.antenna.track_tilt_xy_avg, axis=1), \
                       'tracker.linear_sensor_avg':np.median(data.antenna.track_linsens_avg, axis=1), \
                       'scu.temp':np.median(data.antenna.scu_temp, axis=1),
                       'scu.benchoff':data.antenna.scu_benchoff[:,0],
                       'observation.temp_avg':data.observation.temp_avg,
                       'observation.pressure_avg':data.observation.pressure_avg,
                       'observation.mean_az':data.observation.mean_az,
                       'observation.mean_el':data.observation.mean_el,
                       'observation.wind_dir_avg':data.observation.wind_dir_avg,
                       'observation.wind_speed_avg':data.observation.wind_speed_avg
                       }

        this_filename = "%s_%s_thermolinear.pkl" % (my_name, time_interval[0].file)
        filename_out_dir = os.path.join(directories['output_dir'], my_name, subdirectory_name)
        filename = os.path.join(filename_out_dir, this_filename)

        pk.dump(thermo_data, open(filename, 'w'))

    else:
        data = SPTDataReader(time_interval[0], time_interval[1], 
                             experiment=experiment,
                             master_configfile=master_config)
        data.readData(obstype=my_name, **readout_kwargs)

        #Correct pointing after the fact.
        if correct_pointing:
            op.applyOfflinePointing(data, model='SPT', overwrite_global=True,
                                flags=flags, overwrite=True)

        #Grab the thermometry and metrology data.
        thermo_data = {'tracker.encoder_off':np.median(data.antenna.track_enc_off, axis=1), \
                       'tracker.horiz_mount':np.median(data.antenna.track_hor_mnt, axis=1), \
                       'tracker.horiz_off':np.median(data.antenna.track_hor_off, axis=1), \
                       #'tracker.tilt_xy_avg':np.median(data.antenna.track_tilt_xy_avg, axis=1), \
                       'tracker.linear_sensor_avg':np.median(data.antenna.track_linsens_avg, axis=1), \
                       'scu.temp':np.median(data.antenna.scu_temp, axis=1),
                       'scu.benchoff':data.antenna.scu_benchoff[:,0],
                       'observation.temp_avg':data.observation.temp_avg,
                       'observation.pressure_avg':data.observation.pressure_avg,
                       'observation.mean_az':data.observation.mean_az,
                       'observation.mean_el':data.observation.mean_el,
                       'observation.wind_dir_avg':data.observation.wind_dir_avg,
                       'observation.wind_speed_avg':data.observation.wind_speed_avg
                       }

        this_filename = "%s_%s_thermolinear.pkl" % (my_name, time_interval[0].file)
        filename_out_dir = os.path.join(directories['output_dir'], my_name, subdirectory_name)
        filename = os.path.join(filename_out_dir, this_filename)

        pk.dump(thermo_data, open(filename, 'w'))

    
    if mask_radius:
        if force_source_location:
            ra, dec = force_source_location[0], force_source_location[1]
        else:
            # Find the location of the source. Do a linear fit subtraction now. Make an IDF
            # for the sourcefinding, as a quick and easy way to be able to do light filtering
            # on the sourcefinding map without filtering the real data.
            temp_idf = (data.copy() if read_from_idf else mapidf.MapIDF(data))
            finder_map = quicklook.quickmap(temp_idf, good_bolos=['flagged','pointing'],
                                            reso_arcmin=0.25,
                                            proj=5,
                                            map_shape=[0.8,1.],
                                            map_center=map_center,
                                            t_only=True,
                                            inverse_noise_weighted_map=True,
                                            timestream_filtering={'poly_order':1, 
                                                                  'dynamic_source_mask':True,
                                                                  'outlier_mask_nsigma':3.0})
            temp_idf = None # Discard the temporary IDF now that we're done with it.
            # Discard the map we're not creating. We only need one of them.
            finder_map = finder_map[str(idf_band)] if idf_band is not None else finder_map['150'] 
        
            # Find the RA and dec of the maximum point in the map.
            ra, dec = finder_map.pix2Ang(np.unravel_index(np.argmax(np.abs(finder_map.map)), finder_map.shape))
    

        # Make a temporary pointsource config file, and write it in the arguments
        # to be passed to quickmap.
        ptsrc_configfile = tempfile.NamedTemporaryFile(mode='w', suffix='.txt')
        ptsrc_configfile.write('1 %f %f %f' % (ra, dec, mask_radius))
        ptsrc_configfile.flush()
        print "  Writing temporary pointsource config file, %s . Contents:" % ptsrc_configfile.name
        print '1 %f %f %f\n' % (ra, dec, mask_radius)
        analysis_function_kwargs['timestream_filtering']['pointsource_file'] = ptsrc_configfile.name 
 
        
    for func, kwargs in zip(preprocessing_function, preprocessing_function_kwargs):
        func(data, **kwargs)
        
    print "  Processing sum map.\n"   
    print "  analysis_function_kwargs : %s" % str(analysis_function_kwargs)
    analysis_result = analysis_function(data, **analysis_function_kwargs)
    
    if do_left_right:
        # Now we've done filtering once, no need to do it again.
        lr_analysis_function_kwargs = analysis_function_kwargs.copy() # Don't alter the original kwargs!
        lr_analysis_function_kwargs['timestream_filtering'] = {}
        
        print "  Processing leftgoing map.\n"   
        analysis_result_left = analysis_function(data, use_leftgoing=True, **lr_analysis_function_kwargs)
        
        print "  Processing rightgoing map.\n"   
        analysis_result_right = analysis_function(data, use_leftgoing=False, **lr_analysis_function_kwargs)
    
    if mask_radius:
        ptsrc_configfile.close()
    
    if read_from_idf:
        # If this is an IDF, it's only got one band. Delete maps from the other(s) so we
        # don't save empty maps.
        if str(data.band) not in analysis_result:
            raise RuntimeError("I was expecting to see %s in the output maps, but it's not there!" % str(data.band))
        for band in analysis_result.keys():
            if band!=str(data.band):
                del analysis_result[band]
                if do_left_right:
                    del analysis_result_left[band]
                    del analysis_result_right[band]
    
    if do_left_right:
        return analysis_result, analysis_result_left, analysis_result_right
    else:
        return analysis_result
Example #14
0
from datetime import datetime

from sptpol_software.data.readout import SPTDataReader
from sptpol_software.util.time import SptDatetime
from sptpol_software.analysis import processing
from sptpol_software.autotools import logs
readout_kwargs = {
    'timestream_units':
    'Watts',  # Convert to T_CMB in preprocessing functions.
    'correct_global_pointing': True,
    'downsample_bolodata': 4,
    'project_iq': True,
    'exception_on_no_iq': True
}

for times in logs.readSourceScanTimes('20-Mar-2012',
                                      'now',
                                      'ra23h30dec-55',
                                      nscans_min=1):
    start = SptDatetime(times[0])
    end = SptDatetime(times[1])

    try:
        data = SPTDataReader(start, end)
        data.readData(**readout_kwargs)
        processing.notchFilter(data, verbose=True)
    except ValueError, err:
        print err
Example #15
0
import IPython

def psdAndCoadd(data, NFFT = 64 * 1024, good_bolos = None):
    psd = np.zeros((NFFT / 2,))
    if good_bolos is None:
        good_bolos = data.rec.bolo_ids
    for bolo in good_bolos:
        p, freq = data.bolodata[bolo].psd(NFFT = NFFT)
        psd += p
    psd /= len(good_bolos)
    return psd, freq
    
if __name__ == '__main__':
    from local_config import *
    for t in good_times:
        data = SPTDataReader(start_date = SptDatetime(t[0]), stop_date = SptDatetime(t[1]), quiet = True)
        data.readData(correct_global_pointing = False, quiet = True)
        good_bolos = getGoodBolos(data, good_bolos = ['calibrator', 'elnod', 'flagged'])
        if len(good_bolos) < 10:
            raise RuntimeError('We\'s f****d up the cuts!')
        
        # coadd timestreams
        good_ids = data.rec.boloListToIndex(good_bolos)
        coadd = np.mean(data.bolodata_array[good_ids], 0)
        # coadd = None
        # for gb in good_bolos:
        #     if coadd is None:
        #         coadd = data.bolodata[gb]
        #     else:
        #         coadd += data.bolodata[gb]
        # coadd /= len(good_bolos)
Example #16
0
def mk_plot(start, end, title, save=False):
    i = 0
    while i < len(dat_start) - 1:
        if dat_start[i] <= start and start < dat_start[i + 1]:
            if dat_start[i + 1] < end:
                raise RuntimeError(
                    "Sorry, we can't bridge good " + "vibes data files.\n" +
                    "%s is between %s and %s" %
                    (dat_start[i + 1].astimezone(sptz), start.astimezone(sptz),
                     end.astimezone(sptz)))
            gvdat = GoodVibeReader(dat_start[i].strftime('%Y%m%d_%H%M%S'),
                                   verbose=False)
            break
        if i == len(dat_start) - 2:
            raise RuntimeError("Sorry, we don't have data from %s yet" %
                               str(start.astimezone(sptz)))
        i += 1

    rxx = gvdat.rxx.get_section(start, end)
    rxy = gvdat.rxy.get_section(start, end)
    optics = gvdat.optics.get_section(start, end)
    all_gv = [rxx, rxy, optics]
    data = SPTDataReader(experiment='SPTpol',
                         verbose=False,
                         quiet=True,
                         master_configfile='sptpol_stripped_master_config')
    data.readData(start.astimezone(UTC),
                  end.astimezone(UTC),
                  verbose=False,
                  quiet=True,
                  process_psds=False,
                  remove_unknown_bolos=False,
                  correct_global_pointing=False)

    for b in all_gv:
        b.make_time()
        b.secs_mic = b.mic_t
        b.secs_accel = b.accel_t

    # prep data
    time = data.antenna.track_utc
    elenc1 = data.antenna.track_rawenc[0]
    elenc2 = data.antenna.track_rawenc[1]
    azerr = data.antenna.track_err[0]
    elerr = data.antenna.track_err[1]
    az = data.antenna.track_actual[0]
    el = data.antenna.track_actual[1]
    azrate = data.antenna.track_act_rate[0]
    elrate = data.antenna.track_act_rate[1]
    optics1 = data.antenna.scu_benchact[0]
    optics2 = data.antenna.scu_benchact[1]
    optics3 = data.antenna.scu_benchact[2]

    dt = np.diff(time)
    dt = map(timedelta.total_seconds, dt)
    azacc = np.diff(azrate) / dt
    elacc = np.diff(elrate) / dt

    len_dec = 50
    n_dec = int(np.ceil(float(len(azacc)) / len_dec))
    azacc_dec = np.empty(n_dec)
    elacc_dec = np.empty(n_dec)
    for i in range(n_dec - 1):
        azacc_dec[i] = np.mean(azacc[i * len_dec:(i + 1) * len_dec])
        elacc_dec[i] = np.mean(elacc[i * len_dec:(i + 1) * len_dec])

    azacc_dec[n_dec - 1] = np.mean(azacc[(n_dec - 1) * len_dec:])
    elacc_dec[n_dec - 1] = np.mean(elacc[(n_dec - 1) * len_dec:])

    azacc = azacc_dec
    elacc = elacc_dec
    acc_inds = range(0, len(time), len_dec)

    # mic
    sp = 1
    tsp = 12
    fig = pl.figure(figsize=(8, tsp / 3 * 6))
    pl.subplot(tsp, 1, sp, title='X Mic')
    pl.plot(rxx.secs_mic, rxx.mic, label='rx_x')
    pl.xlim(min(rxx.secs_mic), max(rxx.secs_mic))
    sp += 1

    pl.subplot(tsp, 1, sp, title='Y Mic')
    pl.plot(rxy.secs_mic, rxy.mic, label='rx_y')
    pl.xlim(min(rxy.secs_mic), max(rxy.secs_mic))
    sp += 1

    # accel
    pl.subplot(tsp, 1, sp, title='X Accelerometer')
    pl.plot(rxx.secs_accel, rxx.accel, label='rx_x')
    pl.xlim(min(rxx.secs_accel), max(rxx.secs_accel))
    sp += 1

    pl.subplot(tsp, 1, sp, title='Y Accelerometer')
    pl.plot(rxy.secs_accel, rxy.accel, label='rx_y')
    pl.xlim(min(rxy.secs_accel), max(rxy.secs_accel))
    sp += 1

    # az and el
    pl.subplot(tsp, 1, sp, title='Actual Az')
    pl.plot(time, az, label='Az')
    sp += 1

    pl.subplot(tsp, 1, sp, title='Actual El')
    pl.plot(time, el, label='El')
    sp += 1

    # acc
    pl.subplot(tsp, 1, sp, title='Az Acceleration')
    pl.plot(time[acc_inds], azacc)
    sp += 1

    pl.subplot(tsp, 1, sp, title='El Acceleration')
    pl.plot(time[acc_inds], elacc)
    sp += 1

    # tracker errors
    pl.subplot(tsp, 1, sp, title='Az and El Error')
    pl.plot(time, azerr, label='Az')
    pl.plot(time, elerr, label='El')
    pl.legend()
    sp += 1

    # optics bench
    pl.subplot(tsp, 1, sp, title='Optics Mic')
    pl.plot(optics.secs_mic, optics.mic, label='optics')
    pl.xlim(min(optics.secs_mic), max(optics.secs_mic))
    sp += 1

    pl.subplot(tsp, 1, sp, title='Optics Bench Accelerometer')
    pl.plot(optics.secs_accel, optics.accel, label='optics')
    pl.xlim(min(optics.secs_accel), max(optics.secs_accel))
    sp += 1

    pl.subplot(tsp, 1, sp, title='Optics Bench Position')
    pl.plot(time, optics1, label='Optics 1')
    pl.plot(time, optics2, label='Optics 2')
    pl.plot(time, optics3, label='Optics 3')
    pl.legend()
    sp += 1

    pl.tight_layout()
    # fig.autofmt_xdate()
    if save:
        pl.savefig(title + 'all.png')

    # # az and el
    # fig = pl.figure()
    # pl.subplot(211, title = 'Actual Az')
    # pl.plot(time, az)

    # pl.subplot(212, title = 'Actual El')
    # pl.plot(time, el)
    # pl.tight_layout()
    # fig.autofmt_xdate()
    # if save:
    #     pl.savefig(title + 'az-el.png')

    # # tracker errors
    # fig = pl.figure()
    # pl.subplot(211, title = 'Az Errors')
    # pl.plot(time, azerr)
    # pl.ylim(-.05, .05)

    # pl.subplot(212, title = 'El Errors')
    # pl.plot(time, elerr)
    # pl.ylim(-.06, .06)
    # pl.tight_layout()
    # fig.autofmt_xdate()
    # if save:
    #     pl.savefig(title + 'tracker-errors.png')

    # # az and el rates
    # fig = pl.figure()
    # pl.subplot(211, title = 'Az rate')
    # pl.plot(time, azrate)

    # pl.subplot(212, title = 'El rate')
    # pl.plot(time, elrate)
    # pl.tight_layout()
    # fig.autofmt_xdate()
    # if save:
    #     pl.savefig(title + 'az-el-rate.png')

    if not save:
        pl.show()

    pl.close('all')
Example #17
0
def acc_corr_plots(start, end, fig, save=False):
    i = 0
    while i < len(dat_start) - 1:
        if dat_start[i] <= start and start < dat_start[i + 1]:
            if dat_start[i + 1] < end:
                raise RuntimeError(
                    "Sorry, we can't bridge good " + "vibes data files.\n" +
                    "%s is between %s and %s" %
                    (dat_start[i + 1].astimezone(sptz), start.astimezone(sptz),
                     end.astimezone(sptz)))
            gvdat = GoodVibeReader(dat_start[i].strftime('%Y%m%d_%H%M%S'),
                                   verbose=False)
            break
        if i == len(dat_start) - 2:
            raise RuntimeError("Sorry, we don't have data from %s yet" %
                               str(start.astimezone(sptz)))
        i += 1

    rxx = gvdat.rxx.get_section(start, end)
    rxy = gvdat.rxy.get_section(start, end)
    optics = gvdat.optics.get_section(start, end)
    all_gv = [rxx, rxy, optics]
    data = SPTDataReader(experiment='SPTpol',
                         verbose=False,
                         quiet=True,
                         master_configfile='sptpol_stripped_master_config')
    data.readData(start.astimezone(UTC),
                  end.astimezone(UTC),
                  verbose=False,
                  quiet=True,
                  process_psds=False,
                  remove_unknown_bolos=False,
                  correct_global_pointing=False)

    for b in all_gv:
        b.make_time()
        b.secs_mic = b.mic_t
        b.secs_accel = b.accel_t

    # prep data
    time = data.antenna.track_utc
    elenc1 = data.antenna.track_rawenc[0]
    elenc2 = data.antenna.track_rawenc[1]
    azerr = data.antenna.track_err[0]
    elerr = data.antenna.track_err[1]
    az = data.antenna.track_actual[0]
    el = data.antenna.track_actual[1]
    azrate = data.antenna.track_act_rate[0]
    elrate = data.antenna.track_act_rate[1]
    optics1 = data.antenna.scu_benchact[0]
    optics2 = data.antenna.scu_benchact[1]
    optics3 = data.antenna.scu_benchact[2]

    dt = np.diff(time)
    dt = map(timedelta.total_seconds, dt)
    azacc = np.diff(azrate) / dt
    elacc = np.diff(elrate) / dt

    len_dec = 50
    n_dec = int(np.ceil(float(len(azacc)) / len_dec))
    azacc_dec = np.empty(n_dec)
    elacc_dec = np.empty(n_dec)

    len_dec_accel = float(len(rxx.accel)) / n_dec
    x_accel_dec = np.empty(n_dec)
    y_accel_dec = np.empty(n_dec)
    optics_accel_dec = np.empty(n_dec)
    for i in range(n_dec - 1):
        azacc_dec[i] = np.mean(azacc[i * len_dec:(i + 1) * len_dec])
        elacc_dec[i] = np.mean(elacc[i * len_dec:(i + 1) * len_dec])
        x_accel_dec[i] = np.mean(rxx.accel[i * len_dec:(i + 1) *
                                           len_dec_accel])
        y_accel_dec[i] = np.mean(rxy.accel[i * len_dec:(i + 1) *
                                           len_dec_accel])
        optics_accel_dec[i] = np.mean(optics.accel[i * len_dec:(i + 1) *
                                                   len_dec_accel])

    azacc_dec[n_dec - 1] = np.mean(azacc[(n_dec - 1) * len_dec:])
    elacc_dec[n_dec - 1] = np.mean(elacc[(n_dec - 1) * len_dec:])

    x_accel_dec[i] = np.mean(rxx.accel[(n_dec - 1) * len_dec_accel:])
    y_accel_dec[i] = np.mean(rxy.accel[(n_dec - 1) * len_dec_accel:])
    optics_accel_dec[i] = np.mean(optics.accel[(n_dec - 1) * len_dec_accel:])

    x_accel_dec -= np.mean(x_accel_dec)
    y_accel_dec -= np.mean(y_accel_dec)
    optics_accel_dec -= np.mean(optics_accel_dec)

    azacc = azacc_dec
    elacc = elacc_dec
    acc_inds = range(0, len(time), len_dec)

    accel_inds = range(0, len(rxx.accel), len_dec)

    pl.figure(fig.number)
    pl.subplot(321)
    pl.plot(azacc, x_accel_dec, 'b,')
    pl.ylabel('X Accelerometer')
    pl.subplot(322)
    pl.plot(elacc, x_accel_dec, 'b,')

    pl.subplot(323)
    pl.plot(azacc, y_accel_dec, 'b,')
    pl.ylabel('Y Accelerometer')
    pl.subplot(324)
    pl.plot(elacc, y_accel_dec, 'b,')

    pl.subplot(325)
    pl.plot(azacc, optics_accel_dec, 'b,')
    pl.ylabel('Optics Accel')
    pl.xlabel('Az Acceleration')
    pl.subplot(326)
    pl.plot(elacc, optics_accel_dec, 'b,')
    pl.xlabel('El Acceleration')

    return fig
Example #18
0
        3: '30% Reduction'
    }
    plot_dir = '/home/ndhuang/plots/scan_profiles/stacks/'
    plot_fmt = 'eps'
    f = open(
        '/home/ndhuang/spt_code/sptpol_software/scratch/ndhuang/scan_maker/el_tests_by_scan.pkl',
        'r')
    times = pickle.load(f)
    f.close()

    start = '140504 13:36:38'
    stop = '140504 17:01:57'
    null = open('/dev/null', 'w')
    data = [
        SPTDataReader(start,
                      stop,
                      quiet=True,
                      master_configfile='sptpol_stripped_master_config')
        for i in range(4)
    ]
    # data = SPTDataReader(start, stop, quiet = True,
    #                      master_configfile='sptpol_stripped_master_config')
    skip = False
    for scan in times:
        print '==============================%s====================' % scan
        # method verification
        # start = times[scan][0][0]
        # stop  = times[scan][0][1]
        # data.readData(start, stop, standardize_samplerates = False,
        #               correct_global_pointing = False)
        # el = data.antenna.track_actual[1]
        # el_err = data.antenna.track_err[1]