예제 #1
0
def create_mosaic(flag,target_scan_list,output_dir,cal_scan,waveband,mf):
    '''Creates a mosaic map/cube that combines individual scan maps/timing cubes for a
    SCUBA-2 observation. The following procedure is used:
    (1) Searches for all the calibrated target scan maps or timing cubes
    (2) Creates a list with the names that meet step (1) criteria, and writes them to a file
    (3) Reads the header keys 'LBOUND1,2,3' and 'NAXIS1,2,3' to obtain the
        size limits of the maps to be mosaiced
    (4) Runs kappa.wcsmosaic, producing a combined version of the individual maps/cubes of each scan
    INPUT:  flag - indicates whether we are applying the function to the cropped map or the timing cube (str; 'scan' or 'cube')
            target_scan_list- list of strings with the names of target scan numbers
            output_dir - path to directory where the data products are stored (str)
            cal_scan - calibration scan number or 'default' if auto calibration selected (str)
            mf - flag to indicate if mosaicing maps with matched filter applied ('y' or 'n'; str)
    OUTPUT: Nothign returned, but function creates,
            - mosaiced map/cube (sdf and fits files)
    '''
    final_outputdir=output_dir+'/data_products/'
    output_dir2 = output_dir+'calibrate_'+cal_scan+'/'
    # Get the name of cropped files
    lsts=[]
    if flag == 'scan':
        if mf=='n':
            names_mosaic0 = glob.glob(output_dir2+'*'+waveband+'_cal_crop.sdf')
            outs=output_dir2+'mosaic_map_'+flag+'_'+waveband
        elif mf=='y':
            names_mosaic0 = glob.glob(output_dir2+'*'+waveband+'_cal_crop_mf.sdf')
            outs=output_dir2+'mosaic_map_'+flag+'_'+waveband+'_mf'
    if flag == 'cube':
        names_mosaic0 = glob.glob(output_dir2+'*'+waveband+'_shortmp_cube_cal.sdf')
        outs=output_dir2+'mosaic_map_'+flag+'_'+waveband
    names_mosaic = []
        
    for item in names_mosaic0:
        if any(x in item for x in target_scan_list):
            names_mosaic.append(item)
                
    names_mosaic.sort()
    # Write names into a file
    fileo = open(output_dir2+'mylist_mosaic_'+flag+'_'+waveband+'.lst','w') 
    for name in names_mosaic:
        fileo.write('{0}\n'.format(name))
    fileo.close()
    # Get image size, first convert names_mosaic into .fits file
    fitshdr=fits.open(names_mosaic[0].split('.sdf')[0]+'.fits')[0].header
    lbound1 = int(fitshdr['LBOUND1'])
    lbound2 = int(fitshdr['LBOUND2'])
    lbound3 = int(fitshdr['LBOUND3'])
    naxis1 = int(fitshdr['NAXIS1'])
    naxis2 = int(fitshdr['NAXIS2'])
    naxis3 = int(fitshdr['NAXIS3'])

    # Run wcsmosaic
    kappa.wcsmosaic(in_='^'+output_dir2+'mylist_mosaic_'+flag+'_'+waveband+'.lst', out = outs, lbnd = [lbound1,lbound2,lbound3] , ubnd = [lbound1+naxis1,lbound2+naxis2,lbound3+naxis3],ref = names_mosaic[0])
    # Convert to fits
    convert.ndf2fits(in_=outs+'.sdf',out = outs+'.fits')
    #copy all final processed maps to the data_products directory
    if cal_scan=='default':
        os.system('cp -r '+outs+'.fits '+final_outputdir)
예제 #2
0
def create_map_fits(num,configfile,cal_scan,output_dir,data_dir,crop_file,waveband):
    '''Creates a calibrated, cropped map, with and without matched filtering, of a SCUBA-2 scan. The following data reduction
    commands are performed in starlink:
    (1) Checks if in the data_products directory there is an existing folder 
       for each calibrator scan number or 'default' in the case of auto calibration.
       If not, it creates the folder
    (2) Creates a list with the names of raw data by calling create_list function
    (3) Runs smurf.makemap on the raw data
    (4) Gets the FCF value for the given cal_scan number or uses default values in cause of auto calibration.
    (5) Runs kappa.cmult to apply the FCF correction to the map, converting pW to
       Jy/beam. The output is a new file with '_cal' at the end, indicating that
       the FCF was applied.
    (6) Runs picard.crop_scuba2_images on the output of step (5) to crop the map
    (7) Converts the output of step 6 into fits files

    INPUT:  num - target scan number (str)
            configfile - name of configuration file WITH full path (str)
            cal_scan - calibration scan number or 'default' if auto calibration selected (str)
            output_dir -  path to directory where the data products are stored (str)
            data_dir - path to directory where the raw data is stored (str)
            crop_file - cropping parameter file (str)
            waveband - '8' or '4' for 850um or 450um (str)
    OUTPUT: Nothing returned, but function creates,
            - Calibrated and cropped map files (sdf and fits)
            - Calibrated and cropped map files with matched filter applied (sdf and fits)
    '''
    final_outputdir=output_dir+'/data_products/'
    # Check if directory of cal_scan exists already, if not, create directory
    if not os.path.isdir(output_dir+'calibrate_'+cal_scan):
        os.mkdir(output_dir+'calibrate_'+cal_scan)
    output_dir2 = output_dir+'calibrate_'+cal_scan+'/'
    # Create list
    create_list(num,data_dir,output_dir,cal_scan,waveband)
    # create map
    smurf.makemap(in_='^'+output_dir2+'mylist'+num+'_'+waveband+'.lst',out=output_dir2+'scan_'+num+'_'+waveband,config = '^'+configfile)  
    # get FCF value
    if cal_scan=='default' and waveband=='8':
        fcf_val= 537
    elif cal_scan=='default' and waveband=='4':
        fcf_val= 491
    else:
        fcf_val = FCF(output_dir2+'scan_'+cal_scan+'_'+waveband+'.sdf')
    # run cmult
    kappa.cmult(in_=output_dir2+'scan_'+num+'_'+waveband+'.sdf',scalar = fcf_val, out = output_dir2+'scan_'+num+'_'+waveband+'_cal')
    # cropping map
    cropped_map = picard.crop_scuba2_images([output_dir2+'scan_'+num+'_'+waveband+'_cal.sdf'],recpars = crop_file)
    # move to Data products directory
    os.system('mv '+cropped_map.datafiles[0]+' '+output_dir2)
    #make a matched filter version
    mf_map=picard.scuba2_matched_filter([output_dir2+'scan_'+num+'_'+waveband+'_cal_crop.sdf'])
    os.system('mv '+[item for item in mf_map.datafiles if '_mf' in item][0]+' '+output_dir2+'scan_'+num+'_'+waveband+'_cal_crop_mf.sdf')
    # create fits file
    convert.ndf2fits(in_= output_dir2+'scan_'+num+'_'+waveband+'_cal_crop.sdf',out = output_dir2+'scan_'+num+'_'+waveband+'_cal_crop.fits')
    convert.ndf2fits(in_= output_dir2+'scan_'+num+'_'+waveband+'_cal_crop_mf.sdf',out = output_dir2+'scan_'+num+'_'+waveband+'_cal_crop_mf.fits')
    #copy all final processed maps to the data_products directory
    if cal_scan=='default':
        os.system('cp -r '+output_dir2+'scan_'+num+'_'+waveband+'_cal_crop.fits'+' '+final_outputdir)
        os.system('cp -r '+output_dir2+'scan_'+num+'_'+waveband+'_cal_crop_mf.fits'+' '+final_outputdir)
예제 #3
0
def create_pol_timing(nums,data_dir,output_dir,date,configfile,waveband):
    '''Creates calibrated, cropped I,Q,U timing cubes of a POL-2 scan. The following data reduction
    commands are performed in starlink:
    (1) Checks if in the data_products directory there is an existing folder 
       for each calibrator scan number or 'default' in the case of auto calibration.
       If not, it creates the folder
    (2) Creates a list with the names of raw data by calling create_list function
    (3) Runs smurf.pol2map command on the raw polarization data to create a 
    total intensity cube in pW. It is run again, with the first I cube as input, to produce the
    final I, Q and U cubes.
    (3) Runs kappa.cmult to apply defualt FCFs to IQU cubes (individual scans only)
    (4) IQU cubes are converted to fits files (individual scans only)
    INPUT:  nums - list of strings of target scan numbers
            data_dir - path to directory where raw data is stored (str)
            output_dir - path to directory where the data products are stored (str)
            data - date of observation (YYYMMDD, str)
            configfile - name of configuration file WITH full path (str)
            crop_file - name of cropping parameter file WITH full path (str)
            waveband - '8' or '4' for 850um or 450um (str)
    OUTPUT: Nothing returned, but function creates,
            - Calibrated I,Q,U timing cubes (individual scans only) 
    '''
    final_outputdir=output_dir+'/data_products/'
    if waveband=='8':
        fcf_val=725
    elif waveband=='4':
        fcf_val=962
    output_dir2 = output_dir+'calibrate_default/'
    smurf.pol2map(in_ = '^'+output_dir2+'mylistall_'+waveband+'.lst', iout = output_dir2+'stokes_icube_'+waveband+'/'+'mosaic_Imap',qout='!',\
        uout='!', mapdir = output_dir2+'stokes_icube_'+waveband, qudir = output_dir2+'stokes_qucube_'+waveband,jy=True, fcf='!',skyloop=False,\
        config = '^'+configfile)
    smurf.pol2map(in_=output_dir2+'stokes_qucube_'+waveband+'/*',iout = output_dir2+'stokes_icube_'+waveband+'/'+'mosaic_Imap_2',
              qout=output_dir2+'stokes_qucube_'+waveband+'/'+'mosaic_Qmap', uout=output_dir2+'stokes_qucube_'+waveband+'/'+'mosaic_Umap',
              mapdir=output_dir2+'stokes_icube_'+waveband+'/',mask=output_dir2+'stokes_icube_'+waveband+'/'+'mosaic_Imap',\
              maskout1=output_dir2+'stokes_icube_'+waveband+'/'+'astmask',
              maskout2=output_dir2+'stokes_icube_'+waveband+'/'+'pcamask',ipref=output_dir2+'stokes_icube_'+waveband+'/'+'mosaic_Imap_2',
              cat=output_dir2+'stokes_qucube_'+waveband+'/'+'mycat',debias=True,jy=True, fcf='!',skyloop=False,\
              config =  '^'+configfile)
    #calibrate and convert all output I,Q,U maps (only indv scans have cubes) to fits
    for n in nums:
        #need to cut name length down as starlink gives truncation errors for stackframes task!
        for i in ['I','Q','U']:
            os.system('mv '+output_dir2+'stokes_icube_'+waveband+'/'+date+'_000'+n+'*_'+i+'map.sdf'+' '+output_dir2+'stokes_icube_'+waveband+'/'+'scan_'+n+'_'+i+'map.sdf')
        list_pol=[glob.glob(output_dir2+'stokes_icube_'+waveband+'/'+'scan_'+n+'_'+i+'map.sdf')[0] for i in ['I','Q','U']]
        for item in list_pol:
            smurf.stackframes(in_ = item.strip('.sdf')+'.more.smurf.shortmaps',out = item.strip('.sdf')+'_cube', sort = False, sortby = '')
            kappa.cmult(in_=item.strip('.sdf')+'_cube.sdf',scalar = fcf_val, out = item.strip('.sdf')+'_cube_cal.sdf')
            convert.ndf2fits(in_=item.strip('.sdf')+'_cube_cal.sdf',out = item.strip('.sdf')+'_cube_cal.fits')
            #copy all final processed maps to the data_products directory
            os.system('cp -r '+item.strip('.sdf')+'_cube_cal.fits '+final_outputdir)
예제 #4
0
def timing_cube(num,configfile,cal_scan,output_dir,waveband):
    '''Creates a calibrated timing cube (RA,DEC,time) of a SCUBA-2 scan. The following data reduction
    commands are performed in starlink:
    (1) Checks if in the data_products directory there is an existing folder 
       for each calibrator scan number or 'default' in the case of auto calibration.
       If not, it creates the folder
    (2) Runs smurf.makemap on the raw data for the specified scan (the raw data list file was already
        created in the full scan map function)
    (3) Runs smurf.stackframes to create a cube
    (4) Gets the FCF value for the given cal_scan number or uses default values in cause of auto calibration.
    (5) Runs kappa.cmult to apply the FCF correction to the map, converting pW 
        to Jy/beam. The output is a new file with '_shortmp_cube_cal' at the 
        end,indicating that the FCF was applied.
    (6) Converts the output of step 5 into fits files
    INPUT:  num - target scan number (str)
            configfile - name of configuration file, WITH the full path (str)
            cal_scan - calibration scan number or 'default' if auto calibration selected (str)
            output_dir - path to directory where the data products are stored (str)
    OUTPUT: Nothing returned, but function creates,
            - Timing cube (RA,DEC,time; sdf and fits files)
    '''
    final_outputdir=output_dir+'/data_products/'
    # Check if directory of cal_scan exists already, if not, create directory
    if not os.path.isdir(output_dir+'calibrate_'+cal_scan):
        os.mkdir(output_dir+'calibrate_'+cal_scan)
    output_dir2 = output_dir+'calibrate_'+cal_scan+'/'
    # making timing cube 
    smurf.makemap(in_='^'+output_dir2+'mylist'+num+'_'+waveband+'.lst',out=output_dir2+'scan_'+num+'_'+waveband+'_shortmp',config = '^'+configfile)
    # stacking frames
    smurf.stackframes(in_ = output_dir2+'scan_'+num+'_'+waveband+'_shortmp.more.smurf.shortmaps', out = output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube', sort = False, sortby = '')
    # get FCF val
    if cal_scan=='default' and waveband=='8':
        fcf_val= 537
    elif cal_scan=='default' and waveband=='4':
        fcf_val= 491
    else:
        fcf_val = FCF(output_dir2+'scan_'+cal_scan+'_'+waveband+'.sdf')
    # run cmult
    kappa.cmult(in_=output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube.sdf',scalar = fcf_val, out = output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube_cal')
    # create .fits file
    convert.ndf2fits(in_=output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube_cal.sdf',out = output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube_cal.fits')
    #copy all final processed maps to the data_products directory
    if cal_scan=='default':
        os.system('cp -r '+ output_dir2+'scan_'+num+'_'+waveband+'_shortmp_cube_cal.fits '+final_outputdir)
예제 #5
0
def make_data_dict(regions=['DR21C'],datadirs=['DR21C'],alignment_iteration=0,DIST=7,length=200,kernel_sigma=6,wavelength = '850'):
    """
    :param regions: a list of regions to run
    :param datadirs: a list of directories that hold the data for each region listed in parameter regions.
    :param alignment_iteration: there will be multiple iterations of the alignment run - this 0-based integer designates which alignment iteration the output file describes
    :param DIST: the distance used for linear fitting and gaussian fitting (use width = RADIUS*2 + 1)
    :param length: the distance used for linear fitting and gaussian fitting (use width = RADIUS*2 + 1)
    :param kernel_sigma: the smoothing kernel (in pixels) to subtract largescale structure (high pass filter)
    """
    # + ===================== +
    # | Global parameters     |
    # + ===================== +
    tol = 0.05
    
    REGIONS = {}
    for i in regions:
        REGIONS[i] = {wavelength:1}

    align_smooth_kernel = Gaussian2DKernel(x_stddev=kernel_sigma, y_stddev=kernel_sigma)
    
    data = defaultdict(dict)
    
    for region,datadir in zip(regions,datadirs):
        data[region] = defaultdict(dict)
        Dates850 = []
        Dates450 = []
        DataRoot = datadir + "/"  # where all the data is stored
        files = []
        for eachfile in os.listdir(DataRoot):
            if os.path.isfile(os.path.join(DataRoot, eachfile)):
                if eachfile.split('.')[-1] == 'sdf':
                    if wavelength in eachfile:
                        files.append(eachfile)
        #files = [f for f in os.listdir(DataRoot) if (os.path.isfile(os.path.join(DataRoot, f)) and
        #                                             os.path.join(DataRoot, f)[-4:] ==".sdf")]  # all the files in dir for this wavelength
        files = sorted(files)  # sorting to ensure we select the correct first region
    
        if wavelength == '450':
            scale = 2
        elif wavelength == '850':
            scale = 3
        else:
            scale = 0
        data[region][wavelength] = defaultdict(dict)
        data[region][wavelength]['epoch'] = defaultdict(list)
    
        data[region][wavelength]['dates'] = list()  # to collect all of the dates in the data[region] set
        data[region][wavelength]['JCMT_offset'] = defaultdict(str)  # to use the date as the index
        data[region][wavelength]['header'] = defaultdict(dict)
    
        data[region][wavelength]['XC'] = defaultdict(dict)
        data[region][wavelength]['XC']['offset'] = defaultdict(list)
        data[region][wavelength]['XC']['offset_err'] = defaultdict(list)
        data[region][wavelength]['XC']['alignment'] = defaultdict(list)
    
        data[region][wavelength]['linear'] = defaultdict(dict)
        data[region][wavelength]['linear']['m'] = defaultdict(dict)
        data[region][wavelength]['linear']['m_err'] = defaultdict(dict)
        data[region][wavelength]['linear']['b'] = defaultdict(dict)
        data[region][wavelength]['linear']['b_err'] = defaultdict(dict)
    
        data[region][wavelength]['AC'] = defaultdict(dict)
        data[region][wavelength]['AC']['beam'] = defaultdict(list)
        data[region][wavelength]['AC']['amp'] = defaultdict(list)
        data[region][wavelength]['AC']['amp_err'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_x'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_x_err'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_y'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_y_err'] = defaultdict(list)
        data[region][wavelength]['AC']['theta'] = defaultdict(list)
        data[region][wavelength]['AC']['theta_err'] = defaultdict(list)
        
    
        FEN = files[0]
        FilePath = datadir + "/" + FEN
        OutPath = datadir + "/" + FEN.split('.sdf')[0] + ".fit"
        if os.path.isfile(OutPath):
            pass
        else:
            convert.ndf2fits(FilePath, OutPath)
        FilePath = OutPath
        print('\n\nFIRST EPOCH: '+FilePath+'\n\n')
        FirstEpoch = fits.open(FilePath)  # opening the file in astropy
        FirstEpochData = FirstEpoch[0].data[0]  # Numpy data array for the first epoch
        FirstEpochCentre = np.array([FirstEpoch[0].header['CRPIX1'], FirstEpoch[0].header['CRPIX2']])
    
        # middle of the map of the first epoch
        FED_MidMapX = FirstEpochData.shape[1] // 2
        FED_MidMapY = FirstEpochData.shape[0] // 2
        FirstEpochVec = np.array([FirstEpochCentre[0] - FED_MidMapX,
                                  FirstEpochCentre[1] - FED_MidMapY])
        FirstEpochData = FirstEpochData[
                         FED_MidMapY - length:FED_MidMapY + length + 1,
                         FED_MidMapX - length:FED_MidMapX + length + 1]
        FirstEpochData_smooth = convolve(FirstEpochData, align_smooth_kernel, normalize_kernel=False)
        FirstEpochData -= FirstEpochData_smooth
        for fn in files:
            if wavelength in fn:
                FilePath = datadir + "/" + fn
    
                tau225_start = float(kappa.fitsval(FilePath, 'WVMTAUST').value)
                tau225_end = float(kappa.fitsval(FilePath, 'WVMTAUEN').value)
                tau225 = sum([tau225_start, tau225_end]) / 2
    
                AirMass_start = float(kappa.fitsval(FilePath, 'AMSTART').value)
                AirMass_end = float(kappa.fitsval(FilePath, 'AMEND').value)
                AirMass = sum([AirMass_start, AirMass_end]) / 2
    
                elev_start = float(kappa.fitsval(FilePath, 'ELSTART').value)
                elev_end = float(kappa.fitsval(FilePath, 'ELEND').value)
                elev = int(round(sum([elev_start, elev_end]) / 2, 0))
    
                OutPath = datadir + "/" + fn[:-4] + ".fit"
    
                if os.path.isfile(OutPath):
                    pass
                else:
                    convert.ndf2fits(FilePath, OutPath)
    
                FilePath = OutPath
                hdul = fits.open(FilePath)  # opening the file in astropy
                date = ''.join(str(hdul[0].header['DATE-OBS']).split('T')[0].split('-'))  # extract date from the header
                date += '-' + str(hdul[0].header['OBSNUM'])
                JulianDate = str(float(hdul[0].header['MJD-OBS']) + 2400000.5)
                print('Epoch: {:14}'.format(date))
                data[region][wavelength]['header']['airmass'][date] = AirMass
                data[region][wavelength]['header']['t225'][date] = tau225
                data[region][wavelength]['header']['julian_date'][date] = JulianDate
                data[region][wavelength]['header']['elevation'][date] = elev
                data[region][wavelength]['dates'].append(date)
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2'])  # JCMT's alleged centre is
                hdu = hdul[0]  # a nice compact way to store the data for later.
    
                # data[region][wavelength]['epoch'][date].append(hdu)o
                Epoch = hdu.data[0]  # map of the region
                Map_of_Region = interpolate_replace_nans(correlate(Epoch, clip_only=True),
                                                         Gaussian2DKernel(5))
                Map_of_Region_smooth = convolve(Map_of_Region, align_smooth_kernel, normalize_kernel=False)
                Map_of_RegionXC = Map_of_Region - Map_of_Region_smooth
    
                XC = correlate(epoch_1=Map_of_RegionXC, epoch_2=FirstEpochData).real
                PS = correlate(Map_of_Region, psd=True)
                AC = correlate(Map_of_Region).real  # auto correlation of the map
                centre = (hdul[0].header['CRPIX1'], hdul[0].header['CRPIX2'])  # JCMT's alleged centre is
                Vec = np.array([centre[0] - (hdul[0].shape[2] // 2),
                                centre[1] - (hdul[0].shape[1] // 2)])
                JCMT_offset = FirstEpochVec - Vec  # JCMT offset from headers
                data[region][wavelength]['JCMT_offset'][date] = JCMT_offset  # used for accessing data later.
    
                [[AMP, SIGX, SIGY, THETA], [AMP_ERR, SIGX_ERR, SIGY_ERR, THETA_ERR]], _ = gaussian_fit_ac(AC)
                offset, offset_err = gaussian_fit_xc(XC)
                alignment = JCMT_offset - offset
                Length_Scale = np.sqrt(SIGX * SIGY)
    
                data[region][wavelength]['XC']['offset'][date] = offset * scale
                data[region][wavelength]['XC']['offset_err'][date] = offset_err
                data[region][wavelength]['XC']['alignment'][date] = alignment
    
                data[region][wavelength]['AC']['beam'][date] = Length_Scale
                data[region][wavelength]['AC']['amp'][date] = AMP
                data[region][wavelength]['AC']['amp_err'][date] = AMP_ERR
                data[region][wavelength]['AC']['sig_x'][date] = SIGX
                data[region][wavelength]['AC']['sig_x_err'][date] = SIGX_ERR
                data[region][wavelength]['AC']['sig_y'][date] = SIGY
                data[region][wavelength]['AC']['sig_y_err'][date] = SIGY_ERR
                data[region][wavelength]['AC']['theta'][date] = THETA
                data[region][wavelength]['AC']['theta_err'][date] = THETA_ERR
    
                Clipped_Map_of_Region_LENGTH = np.arange(0, Map_of_Region.shape[0])
                loc = list(product(Clipped_Map_of_Region_LENGTH, Clipped_Map_of_Region_LENGTH))
                MidMapX = AC.shape[1] // 2  # middle of the map x
                MidMapY = AC.shape[0] // 2  # and y
                radius, AC_pows = [], []
                for idx in loc:  # Determining the power at a certain radius
                    r = ((idx[0] - MidMapX) ** 2 + (idx[1] - MidMapY) ** 2) ** (1 / 2)
                    AC_pow = AC[idx[0], idx[1]].real
                    radius.append(r)
                    AC_pows.append(AC_pow)
                radius, AC_pows = zip(*sorted(list(zip(radius, AC_pows)), key=op.itemgetter(0)))
                radius = np.array(radius)
                AC_pows = np.array(AC_pows)
    
                num = len(radius[np.where(radius <= DIST)])
                opt_fit_AC, cov_mat_AC = curve_fit(f, radius[1:num], AC_pows[1:num])
                err = np.sqrt(np.diag(cov_mat_AC))
    
                M = opt_fit_AC[0]
                M_err = err[0]
                B = opt_fit_AC[1]
                B_err = err[1]
    
                data[region][wavelength]['linear']['m'][date] = M
                data[region][wavelength]['linear']['m_err'][date] = M_err
                data[region][wavelength]['linear']['b'][date] = B
                data[region][wavelength]['linear']['b_err'][date] = B_err
    
    
    data = default_to_regular(data)
    if not os.path.exists('data'):
        os.system('mkdir data')
    with open('data/data_Transient_run_'+str(alignment_iteration)+'_'+wavelength+'.pickle', 'wb') as OUT:
        pickle.dump(data, OUT)
예제 #6
0
# Step 5
kappa.cmult(in_=output_dir + 'calibrator_scan_37.sdf',
            scalar=FCF8,
            out=output_dir + 'calibrator_scan_37_cal')
'''
Cropping target maps
'''

cropped_map = picard.crop_scuba2_images(
    [output_dir + 'calibrator_scan_37_cal.sdf'],
    recpars=output_dir + 'crop_parameters.lis')
os.system('rm -rf ' + output_dir + cropped_map.datafiles[0])
os.system('mv ' + cropped_map.datafiles[0] + ' ' + output_dir)

# Step 6
convert.ndf2fits(in_=output_dir + 'calibrator_scan_37_cal.sdf',
                 out=output_dir + 'calibrator_scan_37_cal.fits')

# This is for step 7
names_mosaic = glob.glob(output_dir + '*_crop.sdf')
names_mosaic.sort()

file2 = open(output_dir + 'mylist_mosaic.lst', 'w')
for name in names_mosaic:
    file2.write('{0}\n'.format(name))  # writes name in each line
file2.close()

# in step 7 we need to specify the size of the image, so we need to read the header
convert.ndf2fits(in_=names_mosaic[0], out=names_mosaic[0] + '.fits')
lbound1 = fits.open(names_mosaic[0] + '.fits')[0].header['LBOUND1']
lbound2 = fits.open(names_mosaic[0] + '.fits')[0].header['LBOUND2']
lbound3 = fits.open(names_mosaic[0] + '.fits')[0].header['LBOUND3']
예제 #7
0
def create_pol_map(nums,data_dir,output_dir,date,configfile,crop_file,waveband):
    '''Creates calibrated, cropped stokes cubes of a POL-2 scan. The following data reduction
    commands are performed in starlink:
    (1) Checks if in the data_products directory there is an existing folder 
       for each calibrator scan number or 'default' in the case of auto calibration.
       If not, it creates the folder
    (2) Creates a list with the names of raw data by calling create_list function
    (3) Runs smurf.pol2map command on the raw polarization data to create a 
    total intensity map in pW. It is run again, with the first I map as input, to produce the
    final I, Q and U maps, and the vector catalogue.
    (3) Runs kappa.cmult to apply defualt FCFs to IQU maps (individual scans and mosaics)
    (4) Runs picard.crop_scuba2_images to crop IQU maps (individual scans and mosaics)
    (5) IQU maps are converted to fits files, and a stokes cube is built (individual scans and mosaics)
    INPUT:  nums - list of strings of target scan numbers
            data_dir - path to directory where raw data is stored (str)
            output_dir - path to directory where the data products are stored (str)
            date - date of observation (YYYMMDD, str)
            configfile - name of configuration file WITH full path (str)
            crop_file - name of cropping parameter file WITH full path (str)
            waveband - '8' or '4' for 850um or 450um (str)
    OUTPUT: Nothing returned, but function creates,
            - Calibrated, cropped stokes cubes (individual scans and mosaics with and without matched filters; fits files)
    '''
    final_outputdir=output_dir+'/data_products/'
    if waveband=='8':
        fcf_val=725
    elif waveband=='4':
        fcf_val=962
    if not os.path.isdir(output_dir+'calibrate_default'):
        os.mkdir(output_dir+'calibrate_default')
    output_dir2 = output_dir+'calibrate_default/'
    create_list(nums,data_dir,output_dir,'default',waveband)
    smurf.pol2map(in_ = '^'+output_dir2+'mylistall_'+waveband+'.lst', iout = output_dir2+'stokes_i_'+waveband+'/'+'mosaic_Imap',qout='!',\
        uout='!', mapdir = output_dir2+'stokes_i_'+waveband, qudir = output_dir2+'stokes_qu_'+waveband,jy=True, fcf='!',skyloop=False,\
        config = '^'+configfile)
    smurf.pol2map(in_=output_dir2+'stokes_qu_'+waveband+'/*',iout = output_dir2+'stokes_i_'+waveband+'/'+'mosaic_Imap_2',
              qout=output_dir2+'stokes_qu_'+waveband+'/'+'mosaic_Qmap', uout=output_dir2+'stokes_qu_'+waveband+'/'+'mosaic_Umap',
              mapdir=output_dir2+'stokes_i_'+waveband+'/',mask=output_dir2+'stokes_i_'+waveband+'/'+'mosaic_Imap',\
              maskout1=output_dir2+'stokes_i_'+waveband+'/'+'astmask',
              maskout2=output_dir2+'stokes_i_'+waveband+'/'+'pcamask',ipref=output_dir2+'stokes_i_'+waveband+'/'+'mosaic_Imap_2',
              cat=output_dir2+'stokes_qu_'+waveband+'/'+'mycat',debias=True,jy=True, fcf='!',skyloop=False,\
              config =  '^'+configfile)
    #calibrate,crop and convert all output I,Q,U maps (indv scans and mosaics, both with matched filter versions) to fits
    for n in nums:
        list_pol=[glob.glob(output_dir2+'stokes_i_'+waveband+'/'+date+'_000'+n+'*_'+i+'map.sdf')[0] for i in ['I','Q','U']]
        for item in list_pol:
            kappa.cmult(in_=item,scalar = fcf_val, out = item.strip('.sdf')+'_cal.sdf')
            cropped_map = picard.crop_scuba2_images([item.strip('.sdf')+'_cal.sdf'],recpars = crop_file)
            os.system('mv '+cropped_map.datafiles[0]+' '+output_dir2+'stokes_i_'+waveband)
            mf_map=picard.scuba2_matched_filter([item.strip('.sdf')+'_cal_crop.sdf'])
            os.system('mv '+[i for i in mf_map.datafiles if '_mf' in i][0]+' '+item.strip('.sdf')+'_cal_crop_mf.sdf')
            convert.ndf2fits(in_=item.strip('.sdf')+'_cal_crop.sdf',out = item.strip('.sdf')+'_cal_crop.fits')
            convert.ndf2fits(in_=item.strip('.sdf')+'_cal_crop_mf.sdf',out = item.strip('.sdf')+'_cal_crop_mf.fits')
        lpf=[glob.glob(output_dir2+'stokes_i_'+waveband+'/'+date+'_000'+n+'*_'+i+'map_cal_crop.fits')[0] for i in ['I','Q','U']]
        stokes_cube=create_stokes_cubes(lpf,output_dir2,waveband,date,n,'n','n')
        lpf_mf=[glob.glob(output_dir2+'stokes_i_'+waveband+'/'+date+'_000'+n+'*_'+i+'map_cal_crop_mf.fits')[0] for i in ['I','Q','U']]
        stokes_cube_mf=create_stokes_cubes(lpf_mf,output_dir2,waveband,date,n,'n','y')
        #copy all final processed maps to the data_products directory
        os.system('cp -r '+stokes_cube+' '+final_outputdir)
        os.system('cp -r '+stokes_cube_mf+' '+final_outputdir)
    list_pol_mosaic=glob.glob(output_dir2+'stokes_*'+waveband+'/mosaic_*map.sdf')
    for item in list_pol_mosaic:
        kappa.cmult(in_=item,scalar = fcf_val, out = item.strip('.sdf')+'_cal.sdf')
        cropped_map = picard.crop_scuba2_images([item.strip('.sdf')+'_cal.sdf'],recpars = crop_file)
        os.system('mv '+cropped_map.datafiles[0]+' '+output_dir2+item.split('/')[-2])
        mf_map=picard.scuba2_matched_filter([item.strip('.sdf')+'_cal_crop.sdf'])
        os.system('mv '+[i for i in mf_map.datafiles if '_mf' in i][0]+' '+item.strip('.sdf')+'_cal_crop_mf.sdf')
        convert.ndf2fits(in_=item.strip('.sdf')+'_cal_crop.sdf',out = item.strip('.sdf')+'_cal_crop.fits')
        convert.ndf2fits(in_=item.strip('.sdf')+'_cal_crop_mf.sdf',out = item.strip('.sdf')+'_cal_crop_mf.fits')
    lpfmos=[glob.glob(output_dir2+'stokes_*'+waveband+'/mosaic_'+str(i)+'map_cal_crop.fits')[0] for i in ['I','Q','U']]
    stokes_cube_mosaic=create_stokes_cubes(lpfmos,output_dir2,waveband,date,'','y','n')
    lpfmos_mf=[glob.glob(output_dir2+'stokes_*'+waveband+'/mosaic_'+str(i)+'map_cal_crop_mf.fits')[0] for i in ['I','Q','U']]
    stokes_cube_mosaic_mf=create_stokes_cubes(lpfmos_mf,output_dir2,waveband,date,'','y','y')
    #copy all final processed maps to the data_products directory
    os.system('cp -r '+stokes_cube_mosaic+' '+final_outputdir)
    os.system('cp -r '+stokes_cube_mosaic_mf+' '+final_outputdir)
예제 #8
0
        data[region][wavelength]['AC']['sig_y'] = defaultdict(list)
        data[region][wavelength]['AC']['sig_y_err'] = defaultdict(list)
        data[region][wavelength]['AC']['theta'] = defaultdict(list)
        data[region][wavelength]['AC']['theta_err'] = defaultdict(list)
        if wavelength == '450':
            index = 0
        else:
            index = 1

        FEN = files[index]
        FilePath = ROOT + region + "/sdf/" + FEN
        OutPath = ROOT + region + "/" + FEN[1:-4] + ".fit"
        if os.path.isfile(OutPath):
            pass
        else:
            convert.ndf2fits(FilePath, OutPath)
        FilePath = OutPath
        FirstEpoch = fits.open(FilePath)  # opening the file in astropy
        FirstEpochData = FirstEpoch[0].data[
            0]  # Numpy data array for the first epoch
        FirstEpochCentre = np.array(
            [FirstEpoch[0].header['CRPIX1'], FirstEpoch[0].header['CRPIX2']])

        # middle of the map of the first epoch
        FED_MidMapX = FirstEpochData.shape[1] // 2
        FED_MidMapY = FirstEpochData.shape[0] // 2
        FirstEpochVec = np.array([
            FirstEpochCentre[0] - FED_MidMapX,
            FirstEpochCentre[1] - FED_MidMapY
        ])
        FirstEpochData = FirstEpochData[FED_MidMapY - length:FED_MidMapY +
def TauRelPipeline(source,OR_coeffs,wave,mindate,maxdate,aperture_diam=0.01666666667,physical_thresh=0.05):
    '''
    This pipeline relies on a variety of programs found in the 
    
    `TauRelPrepFunctions.py` and 
    `TauRelAnalysis_20171215.py` packages 
    
    (also `get_noext_reductions_from_kamaka.py`, coded with Graham Bell's help). 
    
    The first has functions that crop images, find files, etc while the second one has functions 
    which can read fits headers using .sdf file format and perform general analysis like running KAPPA's beamfit.

    This code relies on a consistent reduction being performed nightly. Currently, there is a kamaka code that takes 
    all the calibrator data and reduced it on 1 arcsecond pixels with NO EXTINCTION CORRECTION. The data is in pW and it has no extinction correction applied.
    These files can be found by running get_noext_reductions_from_kamaka.py.

    This is the order of operations for `TauRelPipeline.py`:

    1. Gather the reduced data that has no extinction correction or FCF factor applied
    2. Select, from those, the source you are currently interested in
    3. Construct the Transmission versus PWV function as before
    4. For each pair of physical coefficients (a,b) and each calibrator observation:
    
        a. Get the tau225 from the header by averaging WVMTAUST and WVMTAUEN (these WVM values may be wrong because we did not apply an EXT model to the data! Check with the raw WVM information)
        b. Get the airmass from the header, again by averaging over the short observation
        c. Calculate the transmission and compare it to the expected transmission
        d. Apply the new extinction correction (transmission) using CMULT
        e. Crop the image in order to run GaussClumps
        f. Fit the peak flux with GaussClumps and obtain the size & area of the calibrator
        g. Lay down a 1 arcminute diameter aperture centered on the calibrator with beamfit and measure the total flux

    5. Save all the information for every (a,b) pair (size, location, peak flux, total flux, etc.)
    6. Plot the Peak Flux versus Transmission for every calibrator observation and for every (a,b) extinction correction and fit a linear regression
    7. Plot the Total Flux versus Transmission for every calibrator observation and for every (a,b) extinction correction and fit a linear regression
    8. Find the shallowest slopes = the optimal (a,b)


    ######
    To call:

    TauRelPipeline('CRL618',[(26.0, 0.012),(26.5,0.012)],'450','20161101','25001231')

    source          = STR: name of source from fits header
    OR_coeffs       = LIST: a list of tuples (a,b) representing opacity relation coefficients you would like to test:

                      Im = I0 exp(-tau x airmass)

                      So to correct for the extinction, we apply a version of: exp(-tau x airmass)

                      This requires an opacity relation for the wavelengths we care about.
 
                                       tau_wave = a(tau_225 - b)
 
                      The current data reduction uses: 
   
                                       tau_850 = 4.6(tau_225 - 0.0043) 
                                       tau_450 = 26.0(tau_225 - 0.012)
 

    wave            = STR: Wavelength in microns
    mindate         = STR: Minimum date to consider: e.g. '20131231' -- WILL START ONE DAY AFTER
    maxdate         = STR: Maximum date to consider: e.g  '20131231' -- WILL STOP ONE DAY BEFORE
    aperture_diam   = FLOAT: Diameter of aperture to measure total flux
    physical_thresh = FLOAT: The percentage/100.0 that the calculated transmission can differ from the CSO model and still be considered physical

    '''

    print('\n##############\n##############\n##############')
    print('### Beginning ###')
    print('##############\n##############\n##############')
    
    import time
    start_time = time.time()
    import os
    from os import sys
    import subprocess
    import numpy as np
    import matplotlib.pyplot as plt
    import pickle
    import glob
    from starlink import kappa
    
    trans_minus_expectedtrans_thresh = physical_thresh #historical     
    coeffs_from_user = np.array(OR_coeffs)
    
    coeff1s_from_user = []
    coeff2s_from_user = []
    
    for eachcoeffpair in coeffs_from_user:
        coeff1s_from_user.append(eachcoeffpair[0])
        coeff2s_from_user.append(eachcoeffpair[1])
    
    ###############################################
    ###############################################
    ###############################################
    
    
    
    ###########
    ########### Section 1: Preparation
    ###########
    ###################
    # The files we are accessing
    # are uncalibrated and have no extinction
    # correction applied through the makemap
    # procedure - so lets build a list
    # of directories and files
    ###################
    ###########
    ###########
    ###########
    
    os.system('mkdir '+source)
    
    from TauRelPrepFunctions import crop_img
    from TauRelAnalysis_20171215 import getsdfhdr
    
    print('\n##############\n##############\n##############')
    print('### "Gathering list of NOEXT, UNCALIBRATED Files" ###')
    print('##############\n##############\n##############')
    
    getdir_command = "python scripts/get_noext_reductions_from_kamaka.py"
    process = subprocess.Popen(getdir_command.split(), stdout=subprocess.PIPE)
    alldirsbytes, error = process.communicate()
    alldirs = str(alldirsbytes).split('\\n')[0:-1]   
 
    reduced_cal_noext_files=[]
    
    if source+'_'+str(wave)+'_NOEXTfiles_'+str(mindate)+'.txt' not in os.listdir('FileLists/'):
        previous_file_exists = False
        noextfilelist = open('FileLists/'+source+'_'+str(wave)+'_NOEXTfiles_'+str(mindate)+'.txt','w')
        for eachdir in alldirs:
            if eachdir[0]=='b':
                directoryname = eachdir.split("b'")[-1]
            else:
                directoryname = eachdir
            for eachfile in os.listdir(directoryname):
                if eachfile.split('_')[-1]=='reduced.sdf':
                    if eachfile.split('_')[-2]==wave:
                        if np.logical_and(int(eachfile.split('_')[0].split('s')[-1])>=int(mindate),int(eachfile.split('_')[0].split('s')[-1])<int(maxdate)):
                            if kappa.fitsval(directoryname+'/'+eachfile,'OBJECT').value==source:
                                reduced_cal_noext_files.append(directoryname+'/'+eachfile)
                                noextfilelist.write(directoryname+'/'+eachfile+'\n')
        noextfilelist.close()
    else:
        previous_file_exists = True
        dates_of_files_added = []
        scans_of_files_added = []
        for eachfile in open('FileLists/'+source+'_'+str(wave)+'_NOEXTfiles_'+str(mindate)+'.txt','r'):
            print(eachfile,' already done!')
            dates_of_files_added.append(float(eachfile.split('/')[-1].split('_')[0].split('s')[-1]))
            scans_of_files_added.append(float(eachfile.split('/')[-1].split('_')[1]))
        if len(dates_of_files_added)>0:
            date_of_last_file_added = int(max(dates_of_files_added))
            scan_of_last_file_added = int(max(scans_of_files_added))
        else:
            date_of_last_file_added = int(mindate)
            scan_of_last_file_added = -1.0
        noextfilelist = open('FileLists/'+source+'_'+str(wave)+'_NOEXTfiles_'+str(mindate)+'.txt','a')
        for eachdir in alldirs:
            if eachdir[0]=='b':
                directoryname = eachdir.split("b'")[-1]
            else:
                directoryname = eachdir
            for eachfile in os.listdir(directoryname):
                if eachfile.split('_')[-1]=='reduced.sdf':
                    if eachfile.split('_')[-2]==wave:
                        if float(eachfile.split('_')[0].split('s')[-1]) > date_of_last_file_added:
                            if float(eachfile.split('_')[0].split('s')[-1]) < int(maxdate):
                                if kappa.fitsval(directoryname+'/'+eachfile,'OBJECT').value==source:
                                    reduced_cal_noext_files.append(directoryname+'/'+eachfile)
                                    noextfilelist.write(directoryname+'/'+eachfile+'\n')
                        elif float(eachfile.split('_')[0].split('s')[-1]) == date_of_last_file_added:
                            if float(eachfile.split('_')[0].split('s')[-1]) < int(maxdate):
                                if float(eachfile.split('_')[1]) > scan_of_last_file_added:
                                     if kappa.fitsval(directoryname+'/'+eachfile,'OBJECT').value==source:
                                         reduced_cal_noext_files.append(directoryname+'/'+eachfile)
                                         noextfilelist.write(directoryname+'/'+eachfile+'\n')
    
    print('\n##############\n##############\n##############')
    print('### "List of files has been gathered!" ###')
    print('##############\n##############\n##############')
    
    
    ############
    ############ Section 2: Analysis
    ############
    ####################
    # Now, let's define a range of
    # a and b values to step through
    # depending on the wavelength
    # so we can apply a new extinction
    # correction, fit the calibrator
    # and see which slope is the 
    # flattest on a Flux versus Transmission plot.
    ####################
    #############
    #############
    #############
    
    from TauRelAnalysis_20171215 import FitCSOTransvsPWVFast,CSOtrans,fitcal,fitcalBF
    from starlink.ndfpack import Ndf
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    from starlink import convert
    import astropy.io.fits as apfits

    # Construct a functional form of Transmission vs PWV
    
    print('\n##############\n##############\n##############')
    print('### Building Transmission vs PWV function')
    print('##############\n##############\n##############')
    
    TransvsPWV = FitCSOTransvsPWVFast(int(wave))
    
    coeff1_range = coeff1s_from_user
    coeff2_range = coeff2s_from_user
    
    slopes            = []
    intercepts        = []
    coeff1s           = []
    coeff2s           = []
    trans_unphys_perc = []
    
    print('\n##############\n##############\n##############')
    print('### Testing Pairs of Coefficients for the Tau Relation')
    print('##############\n##############\n##############')
    
    results_dict = {}
    if previous_file_exists == True:    
        previous_results_dict = pickle.load(open(sorted(glob.glob('results/TauRelPipeline_FullResults_'+wave+'_'+source+'_'+str(mindate)+'*bin'))[-1],'rb'))
    
    paircount = 0
    
    numfiles = len(reduced_cal_noext_files)
    
    
    for eachpair in range(len(coeff1_range)):
        #for coeff2 in coeff2_range:
    
            paircount = paircount + 1
            print ('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n######\n# Pair '+str(paircount)+' out of '+str(len(coeff1_range))+'\n######\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')
    
            run_num = paircount-1
    
            run_num_str = "Run_"+str(run_num)
    
            # Values to store for each file
            transphysical = [] 
            transmissions = []
            peak_fluxes   = []
            map_peak_fluxes = []
            BFPeak_fluxes = []
            total_fluxes  = []
            areas         = []
            FWHM1s        = []
            FWHM2s        = []
            fnames        = []
            WVMST         = []
            WVMEN         = []
            WVMST_TIME    = []
            WVMEN_TIME    = []
            AMSTART       = []
            AMEND         = []
            obsstart      = []
            obsend        = []
            ATSTART       = []
            ATEND         = []
            delta_trans_list  = [] #abs(trans - expected_trans)
            # New Params
            OBSNUM        = []
            UTDATE        = [] 
            AZSTART       = [] 
            AZEND         = [] 
            ELSTART       = [] 
            ELEND         = [] 
            HUMSTART      = [] 
            HUMEND        = [] 
            BPSTART       = [] 
            BPEND         = [] 
            WNDSPDST      = [] 
            WNDSPDEN      = [] 
            WNDDIRST      = [] 
            WNDDIREN      = [] 
            TAU225ST      = [] 
            TAU225EN      = [] 
            TAUDATST      = [] 
            TAUDATEN      = [] 
            SEEINGST      = [] 
            SEEINGEN      = [] 
            SEEDATST      = [] 
            SEEDATEN      = [] 
            FRLEGTST      = [] 
            FRLEGTEN      = [] 
            BKLEGTST      = [] 
            BKLEGTEN      = [] 
            MJD_OBS       = [] #In FITS HEADER - hyphen
            MJD_END       = [] #IN FITS HEADER - hyphen
            ELAPTIME      = [] 
            
    
            filecount     = 0 
    
            for eachfile in reduced_cal_noext_files:
                    eachfile = eachfile.split('\n')[0]
                    filecount = filecount + 1
                    print ('\n\n File: '+eachfile+' ('+str(filecount)+' out of '+str(numfiles)+')\n\n')
                    # Get the header information to calculate the Transmission 
                    convert.ndf2fits(eachfile,eachfile.split('/')[-1].split('.sdf')[0]+'.fits')
                    hdr = apfits.getheader(eachfile.split('/')[-1].split('.sdf')[0]+'.fits')
                    list_of_header_values = []
                    for eachcard in range(len(hdr.cards)):
                        list_of_header_values.append(hdr.cards[eachcard][0])
                    os.system('rm -f '+eachfile.split('/')[-1].split('.sdf')[0]+'.fits')
                    #hdr = getsdfhdr(eachfile) 
                    
    
                    tau225       = (hdr['WVMTAUST']+hdr['WVMTAUEN'])/2.0
    
                    tau_jcmt     = coeff1_range[eachpair]*(tau225-coeff2_range[eachpair])
    
                    airmass      = (hdr['AMSTART']+hdr['AMEND'])/2.0
    
                    #transmission = np.exp(-1.0*tau_jcmt*airmass)
    
                    # The Tau225 value is the zenith value! So don't modify it by the arimass!!
    		# We are comparing it to the zenith transmission!
    
                    transmission = np.exp(-1.0*tau_jcmt)
    
                    expected_transmission = CSOtrans(int(wave),tau225,TransvsPWV)
    
                    delta_trans = abs(transmission - expected_transmission)
                    delta_trans_list.append(delta_trans)
    
                    if delta_trans > trans_minus_expectedtrans_thresh:
                        
                        transphysical.append('no')
    
                    else:
    
                        transphysical.append('yes')
    
                    # Apply the new extinction correction to the file - YOU NEED THE AIRMASS HERE - THE ORIGINAL EXT CORRECTION INCLUDES THE AIRMASS
                    
                    new_ext_cor    = np.exp(-1.0*tau_jcmt*airmass) 
    
                    extcor_command = '$KAPPA_DIR/cdiv in='+eachfile
                    extcor_command += ' scalar='+str(new_ext_cor)+' out='+source+'/'+eachfile.split('/')[-1].split('.sdf')[0]+'_extcor.sdf'
                    subprocess.call(extcor_command, shell=True)
    
                    #Crop the image!
                    crop_img(source+'/'+eachfile.split('/')[-1].split('.sdf')[0]+'_extcor.sdf','./',CROP_METHOD='CIRCLE',MAP_RADIUS=200)
    
                    # Now measure the peak and extended structure of the calibrator with Gaussclumps, return these values
                    # peak_flux,total_flux, FWHM1, FWHM2 -- need to compare Beamfit to Gaussclumps - benefit of GC: Total and Peak at the same time
    		    # so run gaussclumps, grab the brightest source - and output the info - also tells you FWHM_maj and FWHM_min
                    peak_flux,total_fluxbad,FWHM1,FWHM2,area,peakX,peakY = fitcal(eachfile.split('/')[-1].split('.sdf')[0]+'_extcor_crop.sdf',"GCParms/GCParmsm.txt")
                    BFPeak,BFPeakunc,BFTotal,BFmajFWHM,BFmajFWHMunc,BFminFWHM,BFminFWHMunc = fitcalBF(eachfile.split('/')[-1].split('.sdf')[0]+'_extcor_crop.sdf',hdr,source)
                    if not np.isnan(peak_flux): #Sometimes Gaussclumps does not fit a calibrator- testing with beamfit currently 20170920
   
                        if float(peakX.split('h ')[1].split('m ')[1].split('s')[0])>=60:
                            peakX                                                = peakX.split('h ')[0]+':'+str(int(float(peakX.split('h ')[1].split('m ')[0])+1))+':0'+"{:1.2f}".format(float(peakX.split('h ')[1].split('m ')[1].split('s')[0]) % 60)
                        else:
                            peakX                                                = peakX.split('h ')[0]+':'+peakX.split('h ')[1].split('m ')[0]+':'+peakX.split('h ')[1].split('m ')[1].split('s')[0]
                        coord = SkyCoord(peakX,peakY,unit=(u.hourangle, u.deg))
                        ndf = Ndf(eachfile.split('/')[-1].split('.sdf')[0]+'_extcor_crop.sdf')
                        get_pixel_coord = ndf.wcs.tran([[coord.ra.radian], [coord.dec.radian],[float(wave) / 1000000]],False)
                        (xgrid, ygrid, zgrid) = (get_pixel_coord - 1).flatten().round().astype(int)
                        MAPPeak = ndf.data[0, ygrid, xgrid].item()
                        total_flux                                           = kappa.aperadd(eachfile.split('/')[-1].split('.sdf')[0]+'_extcor_crop.sdf',centre='"'+peakX+','+peakY+'"',diam=aperture_diam).total
                        annulus_ARD                                          = open('annulus.ard','w')
                        annulus_ARD.write('COFRAME(SKY,SYSTEM=FK5,EQUINOX=2000)')

##################################
                        # THE FOLLOWING LINE IS INCORRECT - 20180423 - 2 things wrong here, the background should be calculated between 90 and 120 arcsecond diameter apertures. These are 60 and 90 arcseconds and in annulus_ARD, they should be given in readius! Not Diameter! See the fix one line below.
                        #annulus_ARD.write('CIRCLE('+peakX+','+peakY+',0.025) .AND. .NOT. CIRCLE('+peakX+','+peakY+',0.01666666)') 
                        #
                        # THE FOLLOWING LINE IS CORRECT   - fixed at 12:00pm on 20180423 - everything produced since then will have this fix
                        annulus_ARD.write('CIRCLE('+peakX+','+peakY+',0.016666) .AND. .NOT. CIRCLE('+peakX+','+peakY+',0.0125)')
###################################
                        annulus_ARD.close()
                        kappa.ardmask(eachfile.split('/')[-1].split('.sdf')[0]+'_extcor_crop.sdf','annulus.ard',out='ardmask_bkgnd',INSIDE=False)
                        total_flux_bckgnd                                = kappa.stats('ardmask_bkgnd').mean
                        os.system('rm -f annulus.ard ardmask_bkgnd*')
    
                        peak_fluxes.append(peak_flux)
                        map_peak_fluxes.append(MAPPeak)
                        BFPeak_fluxes.append(BFPeak)
                        total_fluxes.append(total_flux-total_flux_bckgnd)
    
                        areas.append(area)
                        FWHM1s.append(FWHM1)
                        FWHM2s.append(FWHM2)
                        transmissions.append(transmission) # THIS IS THE ZENITH TRANSMISSION - NO AIRMASS
                        fnames.append(source+'/'+eachfile.split('/')[-1].split('.sdf')[0]+'_extcor.sdf')
                        WVMST.append(hdr['WVMTAUST'])
                        WVMEN.append(hdr['WVMTAUEN'])
                        WVMST_TIME.append(hdr['WVMDATST'])
                        WVMEN_TIME.append(hdr['WVMDATEN'])
                        AMSTART.append(hdr['AMSTART'])
                        AMEND.append(hdr['AMEND'])
                        obsstart.append(hdr['DATE-OBS'])
                        obsend.append(hdr['DATE-END'])
                        ATSTART.append(hdr['ATSTART'])
                        ATEND.append(hdr['ATEND'])
                        OBSNUM.append(hdr['OBSNUM']) 
                        UTDATE.append(hdr['UTDATE']) 
                        AZSTART.append(hdr['AZSTART']) 
                        AZEND.append(hdr['AZEND']) 
                        ELSTART.append(hdr['ELSTART']) 
                        ELEND.append(hdr['ELEND']) 
                        HUMSTART.append(hdr['HUMSTART']) 
                        HUMEND.append(hdr['HUMEND']) 
                        BPSTART.append(hdr['BPSTART']) 
                        BPEND.append(hdr['BPEND']) 
                        WNDSPDST.append(hdr['WNDSPDST']) 
                        WNDSPDEN.append(hdr['WNDSPDEN']) 
                        WNDDIRST.append(hdr['WNDDIRST']) 
                        WNDDIREN.append(hdr['WNDDIREN']) 
                        TAU225ST.append(hdr['TAU225ST']) 
                        TAU225EN.append(hdr['TAU225EN']) 
                        TAUDATST.append(hdr['TAUDATST']) 
                        TAUDATEN.append(hdr['TAUDATEN']) 
                        if 'SEEINGST' in list_of_header_values:
                            SEEINGST.append(hdr['SEEINGST'])
                        else:
                            SEEINGST.append(np.nan)
                        if 'SEEINGEN' in list_of_header_values:
                            SEEINGEN.append(hdr['SEEINGEN']) 
                        else:
                            SEEINGEN.append(np.nan)
                        if 'SEEDATST' in list_of_header_values:
                            SEEDATST.append(hdr['SEEDATST']) 
                        else:
                            SEEDATST.append(np.nan)
                        if 'SEEDATEN' in list_of_header_values:
                            SEEDATEN.append(hdr['SEEDATEN']) 
                        else:
                            SEEDATEN.append(np.nan)
                        FRLEGTST.append(hdr['FRLEGTST']) 
                        FRLEGTEN.append(hdr['FRLEGTEN']) 
                        BKLEGTST.append(hdr['BKLEGTST']) 
                        BKLEGTEN.append(hdr['BKLEGTEN']) 
                        MJD_OBS.append(hdr['MJD-OBS']) 
                        MJD_END.append(hdr['MJD-END']) 
                        ELAPTIME.append(hdr['ELAPTIME']) 


                    os.system('rm -f '+eachfile.split('.sdf')[0]+'_extcor_crop.sdf')
                    os.system('rm -f '+source+'/'+eachfile.split('/')[-1].split('.sdf')[0]+'_extcor*.sdf')
    
            if 'no' in transphysical:
                print ('\n For this pairing of coefficients:\n')
                print ('a = '+str(coeff1_range[eachpair]))
                print ('b = '+str(coeff2_range[eachpair]))
                print ('\nTranmission Unphysical for '+str(100*float(len(np.where(np.array(transphysical)=='no')[0]))/float(len(np.array(transphysical))))+'% of measurements \n')
                trans_unphys_perc.append(float(100*len(np.where(np.array(transphysical)=='no')[0]))/float(len(np.array(transphysical))))
    
            else:
                trans_unphys_perc.append(0.0)
    
            coeff1s.append(coeff1_range[eachpair])
            coeff2s.append(coeff2_range[eachpair])
            if len(peak_fluxes)>1:
                m,b = np.polyfit(transmissions,peak_fluxes,1)
                slopes.append(m)
                intercepts.append(b)
            else:
                slopes.append(np.nan)
                intercepts.append(np.nan)

            if previous_file_exists == True:
                previous_results_dict[run_num_str]['fnames'].extend(list(fnames))
                previous_results_dict[run_num_str]['peak_fluxes'].extend(list(peak_fluxes))
                previous_results_dict[run_num_str]['BFPeak'].extend(list(BFPeak_fluxes))
                previous_results_dict[run_num_str]['MapPeak'].extend(list(map_peak_fluxes))
                previous_results_dict[run_num_str]['total_fluxes'].extend(list(total_fluxes))
                previous_results_dict[run_num_str]['areas'].extend(list(areas))
                previous_results_dict[run_num_str]['FWHM1s'].extend(list(FWHM1s))
                previous_results_dict[run_num_str]['FWHM2s'].extend(list(FWHM2s))
                previous_results_dict[run_num_str]['transmissions'].extend(list(transmissions))
                previous_results_dict[run_num_str]['delta_trans'].extend(list(delta_trans_list))
                previous_results_dict[run_num_str]['WVMTAUST'].extend(list(WVMST))
                previous_results_dict[run_num_str]['WVMTAUEN'].extend(list(WVMEN))
                previous_results_dict[run_num_str]['WVMTAUST_TIME'].extend(list(WVMST_TIME))
                previous_results_dict[run_num_str]['WVMTAUEN_TIME'].extend(list(WVMEN_TIME))
                previous_results_dict[run_num_str]['AMSTART'].extend(list(AMSTART))
                previous_results_dict[run_num_str]['AMEND'].extend(list(AMEND))
                previous_results_dict[run_num_str]['OBSSTART'].extend(list(obsstart))
                previous_results_dict[run_num_str]['OBSEND'].extend(list(obsend))
                previous_results_dict[run_num_str]['ATSTART'].extend(list(ATSTART))
                previous_results_dict[run_num_str]['ATEND'].extend(list(ATEND))
                previous_results_dict[run_num_str]['OBSNUM'].extend(list(OBSNUM))
                previous_results_dict[run_num_str]['UTDATE'].extend(list(UTDATE))
                previous_results_dict[run_num_str]['AZSTART'].extend(list(AZSTART))
                previous_results_dict[run_num_str]['AZEND'].extend(list(AZEND))
                previous_results_dict[run_num_str]['ELSTART'].extend(list(ELSTART))
                previous_results_dict[run_num_str]['ELEND'].extend(list(ELEND))
                previous_results_dict[run_num_str]['HUMSTART'].extend(list(HUMSTART))
                previous_results_dict[run_num_str]['HUMEND'].extend(list(HUMEND))
                previous_results_dict[run_num_str]['BPSTART'].extend(list(BPSTART))
                previous_results_dict[run_num_str]['BPEND'].extend(list(BPEND))
                previous_results_dict[run_num_str]['WNDSPDST'].extend(list(WNDSPDST))
                previous_results_dict[run_num_str]['WNDSPDEN'].extend(list(WNDSPDEN))
                previous_results_dict[run_num_str]['WNDDIRST'].extend(list(WNDDIRST))
                previous_results_dict[run_num_str]['WNDDIREN'].extend(list(WNDDIREN))
                previous_results_dict[run_num_str]['TAU225ST'].extend(list(TAU225ST))
                previous_results_dict[run_num_str]['TAU225EN'].extend(list(TAU225EN))
                previous_results_dict[run_num_str]['TAUDATST'].extend(list(TAUDATST))
                previous_results_dict[run_num_str]['TAUDATEN'].extend(list(TAUDATEN))
                previous_results_dict[run_num_str]['SEEINGST'].extend(list(SEEINGST))
                previous_results_dict[run_num_str]['SEEINGEN'].extend(list(SEEINGEN))
                previous_results_dict[run_num_str]['SEEDATST'].extend(list(SEEDATST))
                previous_results_dict[run_num_str]['SEEDATEN'].extend(list(SEEDATEN))
                previous_results_dict[run_num_str]['FRLEGTST'].extend(list(FRLEGTST))
                previous_results_dict[run_num_str]['FRLEGTEN'].extend(list(FRLEGTEN))
                previous_results_dict[run_num_str]['BKLEGTST'].extend(list(BKLEGTST))
                previous_results_dict[run_num_str]['BKLEGTEN'].extend(list(BKLEGTEN))
                previous_results_dict[run_num_str]['MJD-OBS'].extend(list(MJD_OBS))
                previous_results_dict[run_num_str]['MJD-END'].extend(list(MJD_END))
                previous_results_dict[run_num_str]['ELAPTIME'].extend(list(ELAPTIME))

            else:
                results_dict[run_num_str]={}
                results_dict[run_num_str]['fnames']                = fnames
                results_dict[run_num_str]['coeff1']                = coeff1_range[eachpair]
                results_dict[run_num_str]['coeff2']                = coeff2_range[eachpair]
                results_dict[run_num_str]['source']                = source
                results_dict[run_num_str]['peak_fluxes']           = peak_fluxes
                results_dict[run_num_str]['BFPeak']                = BFPeak_fluxes
                results_dict[run_num_str]['MapPeak']               = map_peak_fluxes
                results_dict[run_num_str]['total_fluxes']          = total_fluxes
                results_dict[run_num_str]['areas']                 = areas
                results_dict[run_num_str]['FWHM1s']                = FWHM1s
                results_dict[run_num_str]['FWHM2s']                = FWHM2s
                results_dict[run_num_str]['averageFWHM1']          = np.average(FWHM1s)
                results_dict[run_num_str]['averageFWHM2']          = np.average(FWHM2s)
                results_dict[run_num_str]['transmissions']         = transmissions
                results_dict[run_num_str]['delta_trans']           = delta_trans_list
                results_dict[run_num_str]['WVMTAUST']              = WVMST
                results_dict[run_num_str]['WVMTAUEN']              = WVMEN
                results_dict[run_num_str]['WVMTAUST_TIME']         = WVMST_TIME
                results_dict[run_num_str]['WVMTAUEN_TIME']         = WVMEN_TIME
                results_dict[run_num_str]['AMSTART']               = AMSTART
                results_dict[run_num_str]['AMEND']                 = AMEND
                results_dict[run_num_str]['OBSSTART']              = obsstart
                results_dict[run_num_str]['OBSEND']                = obsend
                results_dict[run_num_str]['ATSTART']               = ATSTART
                results_dict[run_num_str]['ATEND']                 = ATEND
                results_dict[run_num_str]['OBSNUM']                = OBSNUM
                results_dict[run_num_str]['UTDATE']                = UTDATE
                results_dict[run_num_str]['AZSTART']               = AZSTART
                results_dict[run_num_str]['AZEND']                 = AZEND
                results_dict[run_num_str]['ELSTART']               = ELSTART
                results_dict[run_num_str]['ELEND']                 = ELEND
                results_dict[run_num_str]['HUMSTART']              = HUMSTART
                results_dict[run_num_str]['HUMEND']                = HUMEND
                results_dict[run_num_str]['BPSTART']               = BPSTART
                results_dict[run_num_str]['BPEND']                 = BPEND
                results_dict[run_num_str]['WNDSPDST']              = WNDSPDST
                results_dict[run_num_str]['WNDSPDEN']              = WNDSPDEN
                results_dict[run_num_str]['WNDDIRST']              = WNDDIRST
                results_dict[run_num_str]['WNDDIREN']              = WNDDIREN
                results_dict[run_num_str]['TAU225ST']              = TAU225ST
                results_dict[run_num_str]['TAU225EN']              = TAU225EN
                results_dict[run_num_str]['TAUDATST']              = TAUDATST
                results_dict[run_num_str]['TAUDATEN']              = TAUDATEN
                results_dict[run_num_str]['SEEINGST']              = SEEINGST
                results_dict[run_num_str]['SEEINGEN']              = SEEINGEN
                results_dict[run_num_str]['SEEDATST']              = SEEDATST
                results_dict[run_num_str]['SEEDATEN']              = SEEDATEN
                results_dict[run_num_str]['FRLEGTST']              = FRLEGTST
                results_dict[run_num_str]['FRLEGTEN']              = FRLEGTEN
                results_dict[run_num_str]['BKLEGTST']              = BKLEGTST
                results_dict[run_num_str]['BKLEGTEN']              = BKLEGTEN
                results_dict[run_num_str]['MJD-OBS']               = MJD_OBS
                results_dict[run_num_str]['MJD-END']               = MJD_END
                results_dict[run_num_str]['ELAPTIME']              = ELAPTIME

                #if 'no' in transphysical:
                #    results_dict[run_num_str]['trans_unphys_per'] = float(100*len(np.where(np.array(transphysical)=='no')[0]))/float(len(np.array(transphysical)))
                #else:
                #    results_dict[run_num_str]['trans_unphys_per'] = 0.0 
    
    
    #plt.clf()
    #plt.hist(delta_trans_list)
    #plt.savefig('deltatrans_hist_'+str(time.localtime().tm_year)+str(time.localtime().tm_mon)+str(time.localtime().tm_mday)+str(time.localtime().tm_hour)+str(time.localtime().tm_min)+'.pdf',format='pdf')
    #plt.clf()
    
    coeff1s           = np.array(coeff1s)
    coeff2s           = np.array(coeff2s)
    slopes            = np.array(slopes)
    trans_unphys_perc = np.array(trans_unphys_perc)
    
    #plt.hist(trans_unphys_perc)
    #plt.savefig('trans_unphys_perc_'+str(time.localtime().tm_year)+str(time.localtime().tm_mon)+str(time.localtime().tm_mday)+str(time.localtime().tm_hour)+str(time.localtime().tm_min)+'.pdf',format='pdf')
    #plt.clf()
    
    #plt.scatter(np.arange(0,len(coeff1s),1),trans_unphys_perc)
    #plt.savefig('trans_unphys_perc_scatter_'+str(time.localtime().tm_year)+str(time.localtime().tm_mon)+str(time.localtime().tm_mday)+str(time.localtime().tm_hour)+str(time.localtime().tm_min)+'.pdf',format='pdf')
    #plt.clf()
    
    # Save the results dictionary
    YearForFilename  = str(time.localtime().tm_year) 
    if float(time.localtime().tm_mon)<10:
        MonthForFilename = '0'+str(time.localtime().tm_mon)
    else:
        MonthForFilename = str(time.localtime().tm_mon)
    if float(time.localtime().tm_mday)<10:
        DayForFilename   = '0'+str(time.localtime().tm_mday)
    else:
        DayForFilename   = str(time.localtime().tm_mday)
    if float(time.localtime().tm_hour)< 10:
        HourForFilename  = '0'+str(time.localtime().tm_hour)
    else:
        HourForFilename  = str(time.localtime().tm_hour)
    if float(time.localtime().tm_min)<10:
        MinForFilename   = '0'+str(time.localtime().tm_min)
    else:
        MinForFilename   = str(time.localtime().tm_min)

    if previous_file_exists == True:
        pickle.dump(previous_results_dict,open('TauRelPipeline_FullResults_'+wave+'_'+source+'_'+mindate+'_'+YearForFilename+MonthForFilename+DayForFilename+HourForFilename+MinForFilename+".bin",'wb'))
    else:
        pickle.dump(results_dict,open('TauRelPipeline_FullResults_'+wave+'_'+source+'_'+mindate+'_'+YearForFilename+MonthForFilename+DayForFilename+HourForFilename+MinForFilename+".bin",'wb'))

    if not os.path.exists('results'): os.system('mkdir results')
    if not os.path.exists('Figures'): os.system('mkdir Figures')
    os.system('rm -f *crop*')
    os.system('mv *FullResults*bin results/')
    #clean up
    os.system('rm -f disp.dat log.group rules.badobs s201*sdf')
    os.system('mv *pdf Figures/')
    
    
    #print('\n##############\n##############\n##############')
    #print('### Finding the best Tau Relation!')
    #print('##############\n##############\n##############')
    
    
    # Now find the best ind:
    unphysical = 1
    while unphysical == 1:
        best_ind              = np.argmin(abs(slopes))
        precentage_unphysical = trans_unphys_perc[best_ind]
        if precentage_unphysical>40:
            coeff1s           = np.delete(coeff1s,best_ind)
            coeff2s           = np.delete(coeff2s,best_ind)
            slopes            = np.delete(slopes,best_ind)
            trans_unphys_perc = np.delete(trans_unphys_perc,best_ind)
        else:
            unphysical = 0
    
    #print ('\n\nBest Coefficients: \n\n')
    #print ('a = '+str(coeff1s[best_ind]))
    #print ('b = '+str(coeff2s[best_ind]))
    #print ('\ntau_'+wave+' = '+str(round(coeff1s[best_ind],2))+' x (tau_225 - '+str(round(coeff2s[best_ind],5))+')\n\n')
    #print ('Percentage of points that have unphysical transmissions: '+str(trans_unphys_perc[best_ind])+'%\n\n')
    
    # Write out the results
    #results_file = open("TauRelPipeline_Results_"+str(time.localtime().tm_year)+str(time.localtime().tm_mon)+str(time.localtime().tm_mday)+str(time.localtime().tm_hour)+str(time.localtime().tm_min)+".txt","w")
    #results_file.write('\n\nBest Coefficients: \n\n')
    #results_file.write('a = '+str(coeff1s[best_ind])+'\n')
    #results_file.write('b = '+str(coeff2s[best_ind]))
    #results_file.write('\ntau_'+wave+' = '+str(round(coeff1s[best_ind],2))+' x (tau_225 - '+str(round(coeff2s[best_ind],5))+')\n\n')
    #results_file.write('Slope and intercept of Peak Flxu versus Transmission plot: \n')
    #results_file.write('Percentage of points that have unphysical transmissions: '+str(trans_unphys_perc[best_ind])+'%\n\n')
    #results_file.write('\n\n'+str(((time.time() - start_time)/60.0))+' minutes to complete.\n\n')
    #results_file.close()
    
    os.system('mv *Results*txt results/')
    os.system('rm -f '+source+'/*')
    os.system('rmdir '+source+'/')

    print ('\n\n'+str(((time.time() - start_time)/60.0))+' minutes to complete.\n\n')
예제 #10
0
print('Uranus primary beam:')
print('    Primary  FWHM: {:.3f}"'.format(
    (beamfit_uranus.majfwhm[0] * u.radian).to(u.arcsec)))
print('    Secon.   FWHM: {:.3f}"'.format(
    (beamfit_uranus.majfwhm[2] * u.radian).to(u.arcsec)))
print('    Pri Amp (rel): {:.3f}'.format(primaryamp /
                                         (primaryamp + secondaryamp)))
print('    Sec Amp (rel): {:.3f}'.format(secondaryamp /
                                         (primaryamp + secondaryamp)))

# Plot of the Uranus beam map.
# Create fits file
from starlink import convert
if os.path.isfile('uranus_deep.fits'):
    os.remove('uranus_deep.fits')
convert.ndf2fits(uranusdeep, 'uranus_deep.fits')
if os.path.isfile('uranus_beamfit_residuals.fits'):
    os.remove('uranus_beamfit_residuals.fits')
convert.ndf2fits('uranus_beamfit_residuals.sdf',
                 'uranus_beamfit_residuals.fits')

hdu_deep = fits.open('uranus_deep.fits')[0]
datauranus = hdu_deep.data[0, :, :]

fig = plt.figure(figsize=(6.5, 2.5))
ax = fig.add_subplot(121)
smax = 2.32894e-05
smin = -4.86899e-07
norm = ImageNormalize(vmin=smin,
                      vmax=smax,
                      stretch=astropy.visualization.SqrtStretch())