Beispiel #1
0
def perform_optimization(location, qValue=0.75, qFloor=0.70, qInitial=0.90, use_config_file=True):
    if use_config_file == True:
        qValue = float(initialize.get_config_value('qValue'))
        qFloor = float(initialize.get_config_value('qFloor'))
        qInitial = float(initialize.get_config_value('qInitial'))
    print("-> Searching for bad subtractions...")
    residuals = glob.glob(location + '/residuals/*_residual_.fits')
    log_loc = location + '/residuals/log.txt'
    for r in residuals:
        hdu = fits.open(r, mode='update')
        hdr = hdu[0].header
        if hdr['OPTIMIZE'] == 'N':
            startTime = time.time()
            res_name = r.split('/')[-1]
            location = r.replace('/residuals/' + res_name,'')
            image_name = res_name[:-14]
            image = location + '/data/' + image_name + '.fits'
            Q = get_qValue(image, opt=False)
            if Q <= (qInitial) or np.isnan(Q) == True:
                q_param = optimum_config(r, qValue, qFloor, bkg_match=False, sat=True)
                if q_param < qFloor or np.isnan(q_param) == True:
                    q_param_2 = optimum_config(r, qValue, qFloor, bkg_match=True, sat=True)
                    if q_param_2 < qFloor or np.isnan(q_param_2) == True:
                        log = open(log_loc, 'a+')
                        if q_param_2 > q_param:
                            q_param = q_param_2
                        endTime = time.time()
                        log.write("%s.fits | no optimal configuration found | Q=%.2f | runtime=%.2fs | residual set to zero\n" % (image_name, q_param, (endTime-startTime)))
                        log.close()
            else:
                hdr.set('OPTIMIZE', 'Y')
                log = open(log_loc, 'a+')
                log.write("%s.fits | deg_spatial=2, nstamps=18, half_mesh=5, half_stamp=10, sig2=2, sig3=4 | Q=%.2f | no optimization needed\n" % (image_name, Q))
                log.close()
        hdu.close()
Beispiel #2
0
def spread_model_filter(location,
                        spread_model_min=-0.025,
                        spread_model_max=0.1,
                        MR=False,
                        use_config_file=True):
    #filter source file by spread_model and puts the results in filtered_sources.txt
    source_loc = location + '/sources'
    source_txt_loc = source_loc + '/sources.txt'
    source_txt_filtered_loc = source_loc + '/filtered_sources.txt'
    if MR == True:
        source_txt_loc = source_loc + '/MR_sources.txt'
        source_txt_filtered_loc = source_loc + '/MR_sources_filtered.txt'
    del_lin = []
    with open(source_txt_loc, 'r') as src:
        lines = src.readlines()
        src.close()
    if use_config_file == True:
        spread_model_min = initialize.get_config_value('spread_model_min')
        spread_model_max = initialize.get_config_value('spread_model_max')
    for lin in lines:
        parse = lin.split()
        if parse != []:
            try:
                int(parse[0])
                if float(parse[-1]) < spread_model_min or float(
                        parse[-1]) > spread_model_max:
                    del_lin.append(lin)
            except ValueError or IndexError:
                pass
    lines = [a for a in lines if a not in del_lin]
    with open(source_txt_filtered_loc, 'w+') as fil_src:
        fil_src.writelines(lines)
        fil_src.close()
Beispiel #3
0
def fit(image, stamp_num=3, stamp_size=(300,300), clipped_sig=3, Qthresh=0.25, 
        corr_thresh=0.50, s_x=0, s_y=0, s_width=30, stdThresh=1000, real_psf=True,
        real_x=[], real_y=[], real_stamp=25, optimize=True, distribution='skellam',
        method='curvefit', simulate=False, corr_check=True, use_config_file=True):
    if use_config_file == True:
        location = image.split('/')[:-2]
        location = '/'.join(location)
        stamp_num = initialize.get_config_value('stamp_num', file_loc=location+'/configs')
        clipped_sig = initialize.get_config_value('clipped_sig', file_loc=location+'/configs')
        Qthresh = initialize.get_config_value('qThresh', file_loc=location+'/configs')
        method = initialize.get_config_value('fit_method', file_loc=location+'/configs')
        corr_thresh = initialize.get_config_value('corr_thresh', file_loc=location+'/configs')
        corr_check = initialize.get_config_value('corr_check', file_loc=location+'/configs')
        stdThresh = initialize.get_config_value('stdThresh', file_loc=location+'/configs')
        real_psf = initialize.get_config_value('real_psf', file_loc=location+'/configs')
        real_stamp = initialize.get_config_value('real_stamp', file_loc=location+'/configs')
    if corr_check == True:
        biweight_corr_0 = corr(image, opt=optimize, dim=0)
        biweight_corr_1 = corr(image, opt=optimize, dim=1)
        biweight_corr = np.max((biweight_corr_0, biweight_corr_1))
        if biweight_corr >= corr_thresh:
            return 0
    res_stamps, image_median, template_median, X2 = get_res_data(image, numStamps=stamp_num, 
                                                             stampSize=stamp_size, 
                                                             clipped_sigma=clipped_sig,
                                                             sim_x=s_x, sim_y=s_y, sim_size=s_width,
                                                             sim=simulate, opt=optimize, real=real_psf,
                                                             real_X=real_x, real_Y=real_y,
                                                             real_stamp_size=real_stamp)
    if method == 'chi2':
        Qs = []
        Qs_norm = []
        qFloor = float(initialize.get_config_value('qFloor'))
        for i in range(len(res_stamps)):
            Qs.append(fit_chi2(res_stamps[i], image_median[0], template_median[0], std_thresh=stdThresh))
        if np.min(Qs) < Qthresh:
            Q = np.min(Qs)
        else:
            Q = np.max(Qs)
        if Q < qFloor:
            for i in range(len(res_stamps)):
                Qs_norm.append(fit_chi2(res_stamps[i], image_median[0], template_median[0], std_thresh=stdThresh, normalize=False))
            if np.min(Qs_norm) < Qthresh:
                Q_norm = np.min(Qs_norm)
            else:
                Q_norm = np.max(Qs_norm)
            if Q_norm > Q:
                Q = Q_norm
        return np.round(Q, decimals=2)
    elif method == 'curvefit':
        Q = fit_curvefit(res_stamps, image_median, template_median, X2, dist=distribution, qThresh=Qthresh, std_thresh=stdThresh)
        return np.round(Q, decimals=2)
Beispiel #4
0
def mask_sources_image(res_image, aperture_diam=1.5, use_config_file=True):
    if use_config_file == True:
        aperture_diam = initialize.get_config_value('aperture_diam')
    res_data = fits.getdata(res_image)
    res_mask = fits.getdata(res_image, 1)
    weight_check = False
    if fits.getval(res_image, 'WEIGHT') == 'Y':
        weight_check = True
        res_mask = (res_mask - 1) * -1
    image = res_image.replace('_residual', '')
    image = image.replace('residuals', 'data')
    im_fwhm = psf.fwhm(image)
    unfiltered_sources, unfiltered_inds = get_sources(image, filtered=False)
    filtered_sources, filtered_inds = get_sources(image, filtered=True)
    for unf in unfiltered_sources:
        if unf not in filtered_sources:
            new_mask = mask_source(res_data.shape[0], res_data.shape[1],
                                   (unf[1], unf[2]), aperture_diam * im_fwhm)
            res_mask = np.logical_or(res_mask, new_mask)
    data_hdu = fits.PrimaryHDU(res_data, header=fits.getheader(res_image))
    if weight_check == True:
        mask_hdu = fits.ImageHDU((res_mask - 1) * -1)
    else:
        mask_hdu = fits.ImageHDU(res_mask)
    list_hdu = fits.HDUList([data_hdu, mask_hdu])
    list_hdu.writeto(res_image, overwrite=True)
Beispiel #5
0
def reoccuring(location, pix_dist=1.5, use_config_file=True):
    if use_config_file == True:
        pix_dist = initialize.get_config_value('pix_dist')
    sources, indices = get_all_sources(location)
    for i in range(len(sources)):
        new_sources, new_indices = get_all_sources(location)
        del_inds = []
        for j in range(len(new_sources[i])):
            inds = []
            x_low = round(new_sources[i][j][1]) - pix_dist
            x_high = round(new_sources[i][j][1]) + pix_dist
            y_low = round(new_sources[i][j][2]) - pix_dist
            y_high = round(new_sources[i][j][2]) + pix_dist
            check = 0
            for h in range(len(new_sources)):
                if h != i:
                    for k in range(len(new_sources[h])):
                        x = new_sources[h][k][1]
                        y = new_sources[h][k][2]
                        if x_low < x < x_high and y_low < y < y_high:
                            if check == 0:
                                inds.append(new_indices[i][j])
                            inds.append(new_indices[h][k])
                            check += 1
            if len(inds) >= (len(sources) / 2):
                for index in inds:
                    del_inds.append(index)
        update_filtered_sources(location, del_inds)
Beispiel #6
0
def MR(path, method='swarp', sig_thresh=4, gauss_sig=3, gauss_filt=False, use_config_file=True):
    '''Stacks residual frames into a *master residual*. Extremely useful for identifying faint variables and quick object detection, but should be used with caution. See documentation for details.
    
    :param str path: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets.
    :param str method: Stacking method. 
    
        * *swarp* (default): Uses ``SWarp`` (Bertin) to stack the residuals according to the weighted average of the pixels.
        * *sos*: Sum of squares, pixel-wise.
        * *sos_abs*: Absolute sum of squares, pixel-wise. Preserves sign. Mathematically, this look like :math:`\Sigma(p_i * |p_i|)` with :math:`p_i` being the :math:`ith` pixel. For example, a series of pixels [10, 2, -3, -6] would be stacked according to 100 + 4 + -9 + -36.
        * *sigma_clip*: Takes the median of each pixel, unless there exists a pixel above or below a certain number of sigmas, in which case this outlying pixel is taken to be the stacked value.
    
    
    :param float sig_thresh: Only used for *sigma_clip* method. Number of sigmas pixel must exceed to be used as stacked value.
    :param float gauss_sig: Only used for *sigma_clip* method. Number of sigmas used for gaussian filter.
    :param bool gauss_filt: Only used for *sigma_clip* method. When ``True`` the final master residual will be smoothed with a gaussian filter with a sigma equal to *gauss_sig*.
    :param bool use_config_file: If ``True`` all input parameters are fetched from the local *OASIS.config* file.
    :returns: A stacked master residual frame, located in the **residuals** directory with the name *MR.fits*.
    
    '''
    paths = (path.replace(' ','')).split(',')
    del path
    for path in paths:
        if use_config_file == True:
            method = initialize.get_config_value('MR_method')
        if method == 'sos' or method == 'sos_abs' or method == 'sigma_clip':
            MR_other(path, mode=method, sigma_thresh=sig_thresh, gauss_sigma=gauss_sig, gauss_filter=gauss_filt)
        elif method == 'swarp':
            MR_swarp(path)
        else:
            print("\n-> Error: Unrecognized method\n-> Please enter either 'swarp', 'sos', 'sos_abs', or 'sigma_clip'\n-> Exiting...")
Beispiel #7
0
def check_saturate(location, max_sat_pix=10, use_config_file=True):
    print("\n-> Checking images for saturation not found by masking...")
    Max = []
    im = []
    m = []
    y = 0
    images = glob.glob(location + "/*_N_.fits")
    if use_config_file == True:
        max_sat_pix = get_config_value('max_sat_pix')
    if images != []:
        for i in images:
            hdu = fits.open(i)
            satur = hdu[0].header['SATURATE']
            lin = hdu[0].header['MAXLIN']
            data = hdu[0].data
            try:
                MSK = hdu[1].data
            except:
                hdu.close()
                name = i.split('/')[-1]
                print('-> Error: Mask corrupted for %s, attempting to mask again...'
                      % (name))
                hdu = fits.open(i, mode='update')
                (hdu[0].header).set('MASKED', 'N')
                hdu.close()
                mask.maskImages(location[:-5])
                try:
                    hdu = fits.open(i)
                    MSK = hdu[1].data
                except:
                        hdu.close()
                        print('-> Error: Could not generate mask, moving %s to OASIS data archive' 
                              % (name))
                        os.system('mv %s %s/OASIS/archive/data' % (i, loc))
                        continue
            data = np.ma.array(data, mask=MSK)
            if satur > lin:
                lin = satur
            sat = ((data>lin)).sum()
            if sat > max_sat_pix:
                y += 1
                im.append(i)
                m.append(np.max(data))
            Max.append(np.max(data))
            sat = 0
            hdu.close()
        if y > 0:
            print("\n-> %d/%d saturated images" % (y, len(images)))
            print("\n-> average saturation level (ADU) = %d" % (np.mean(m)-lin))
            return im
        if y == 0:
            diff = lin - np.max(Max)
            print("\n-> no saturated images found")
            print("\n-> closest value to saturation = %d" % (np.max(Max)))
            print("\n-> difference between this value and saturation level = %d\n" % (diff))
            return y
    else:
        print("-> Images have already been checked for saturation")
        return 0
Beispiel #8
0
def get_SNR(location, reject_sigma=1000, reject_SNR=80, use_config_file=True):
    print('-> Checking image quality...')
    #estimate average SNR for each image
    images = glob.glob(location + '/*.fits')
    SNRs = []
    bad_images = []
    if use_config_file == True:
        reject_SNR = get_config_value('reject_SNR',
                                      file_loc=location[:-5] + '/configs')
        reject_sigma = get_config_value('reject_sigma',
                                        file_loc=location[:-5] + '/configs')
    for i in images:
        hdu = fits.open(i)
        data = hdu[0].data
        data = data.byteswap().newbyteorder()
        bkg = np.median(data)
        try:
            try:
                objects = sep.extract(data, reject_sigma)
            except:
                try:
                    objects = sep.extract(data, reject_sigma * 10)
                except:
                    objects = sep.extract(data, reject_sigma * 100)
            avgSNR = float(np.average(objects['peak'] / bkg))
            SNRs.append((avgSNR, i))
            if avgSNR < reject_SNR:
                bad_images.append(i)
        except:
            bad_images.append(i)
            SNRs.append((0, i))
        hdu.close()
    #move images that weren't able to be SExtracted to archives/bad_images
    print("\n-> Moving bad images to OASIS/archive/bad_images...")
    if bad_images != []:
        os.system("mkdir --parents %s/OASIS/archive/bad_images" % (loc))
    for im in bad_images:
        os.system("mv %s %s/OASIS/archive/bad_images" % (im, loc))
    print("\n-> Moved %d bad image(s) to OASIS archive" % (len(bad_images)))
    #designate image with max SNR the reference image
    ref_image = max(SNRs, key=itemgetter(0))[1]
    return ref_image
Beispiel #9
0
def int_match_to_ref(location, nx=100, ny=100, use_config_file=True):
    print("\n-> Matching flux scales to reference image...")
    ref_im = glob.glob(location + '/data/*_ref_A_.fits')
    images = glob.glob(location + '/data/*_A_.fits')
    initialize.create_configs(location)
    linmatch_loc = location + '/configs/linmatch.txt'
    if os.path.exists(linmatch_loc) == False:
        os.system('touch %s' % (linmatch_loc))
    if use_config_file == True:
        nx = initialize.get_config_value('int_match_nx',
                                         file_loc=location + '/configs')
        ny = initialize.get_config_value('int_match_ny',
                                         file_loc=location + '/configs')
    for i in images:
        if i != ref_im[0]:
            try:
                fits.getval(i, 'SCALED')
            except KeyError:
                fits.setval(i, 'SCALED', value='N')
            if fits.getval(i, 'SCALED') == 'N':
                pyraf.iraf.linmatch(i + '[0]',
                                    ref_im[0] + '[0]',
                                    'grid %dx %dy' % (nx, ny),
                                    linmatch_loc,
                                    output=i + 'TEMP')
                temp_image = glob.glob(location + '/data/*TEMP.fits')
                temp_image_hdu = fits.open(temp_image[0])
                temp_image_data = temp_image_hdu[0].data
                image_hdu = fits.open(i, mode='update')
                image_hdr = image_hdu[0].header
                image_hdr.set('SCALED', 'Y')
                image_mask = image_hdu[1].data
                temp_image_hdu.close()
                image_hdu.close()
                image_hdr = fits.getheader(i)
                hduData = fits.PrimaryHDU(temp_image_data, header=image_hdr)
                hduMask = fits.ImageHDU(image_mask)
                hduList = fits.HDUList([hduData, hduMask])
                hduList.writeto(i, overwrite=True)
                os.system("rm %s" % (temp_image[0]))
Beispiel #10
0
def fit(image,
        stamp_num=3,
        stamp_size=(300, 300),
        clipped_sig=3,
        Qthresh=0.25,
        corr_thresh=0.50,
        optimize=True,
        distribution='skellam',
        method='chi2',
        use_config_file=True):
    if use_config_file == True:
        stamp_num = initialize.get_config_value('stamp_num')
        clipped_sig = initialize.get_config_value('clipped_sig')
        Qthresh = initialize.get_config_value('qThresh')
        method = initialize.get_config_value('fit_method')
        corr_thresh = initialize.get_config_value('corr_thresh')
    biweight_corr = corr(image, opt=optimize)
    if biweight_corr >= corr_thresh:
        return 0
    else:
        res_stamps, image_median, template_median = get_res_data(
            image,
            numStamps=stamp_num,
            stampSize=stamp_size,
            clipped_sigma=clipped_sig,
            opt=optimize)
        if method == 'chi2':
            Qs = []
            Qs_norm = []
            qFloor = float(initialize.get_config_value('qFloor'))
            for i in range(len(res_stamps)):
                Qs.append(
                    fit_chi2(res_stamps[i], image_median[0],
                             template_median[0]))
            if np.min(Qs) < Qthresh:
                Q = np.min(Qs)
            else:
                Q = np.max(Qs)
            if Q < qFloor:
                for i in range(len(res_stamps)):
                    Qs_norm.append(
                        fit_chi2(res_stamps[i],
                                 image_median[0],
                                 template_median[0],
                                 normalize=False))
                if np.min(Qs_norm) < Qthresh:
                    Q_norm = np.min(Qs_norm)
                else:
                    Q_norm = np.max(Qs_norm)
                if Q_norm > Q:
                    Q = Q_norm
            return Q
        elif method == 'curvefit':
            Q = fit_curvefit(res_stamps,
                             image_median,
                             template_median,
                             dist=distribution,
                             qThresh=Qthresh)
            return Q
Beispiel #11
0
def write_total_sources(location):
    Q_min = float(initialize.get_config_value('qFloor'))
    print("\n-> Calculating detection statistics...\n")
    uniqueSources, numFilteredSources = source_count(location)
    originalSources, numSources = source_count(location, filtered=False)
    MR_sources, MR_inds = get_sources("%s/residuals/MR.fits" % (location),
                                      MR=True,
                                      filtered=False)
    MR_sources_filt, MR_inds_filt = get_sources("%s/residuals/MR.fits" %
                                                (location),
                                                MR=True,
                                                filtered=True)
    MR_sources = len(MR_sources)
    MR_sources_filt = len(MR_sources_filt)
    total_source_loc = location + '/sources/total_sources.txt'
    residuals = glob.glob(location + '/residuals/*_residual_.fits')
    bad_subtractions = 0
    date = datetime.datetime.now()
    for r in residuals:
        if (fits.getdata(r, 1) == 0).all():
            bad_subtractions += 1
    with open(total_source_loc, 'a+') as total:
        total.write(
            'Date Run: %d/%d/%d %d:%d:%d | Number of Images Subtracted = %d\n'
            % (date.month, date.day, date.year, date.hour, date.minute,
               date.second, len(residuals)))
        total.write('Total Initial Sources: %d\n' % (numSources))
        print('\nTotal Initial Sources: %d\n' % (numSources))
        total.write('Total Filtered Sources: %d\n' % (numFilteredSources))
        print('Total Filtered Sources: %d\n' % (numFilteredSources))
        total.write('Total Unique Detections: %d\n' % (len(uniqueSources)))
        print('Total Unique Detections: %d\n' % (len(uniqueSources)))
        total.write('\nTotal Master Residual Sources: %d\n' % (MR_sources))
        print('\nTotal Master Residual Sources: %d\n' % (MR_sources))
        total.write(
            'Total Filtered Master Residual Sources (best representation of real # of sources): %d\n'
            % (MR_sources_filt))
        print(
            'Total Filtered Master Residual Sources (best representation of real # of sources): %d\n'
            % (MR_sources_filt))
        total.write('\nBad Subtractions (Q-Value < 0.50): %d/%d' %
                    (bad_subtractions, len(residuals)))
        print('\nBad Subtractions (Q-Value < %.2f): %d/%d\n' %
              (Q_min, bad_subtractions, len(residuals)))
        total.write('\nAverage Number of Sources Per Image: %.2f\n\n\n' %
                    (float(numFilteredSources / len(residuals))))
        print('\nAverage Number of Sources Per Image: %.2f\n\n\n' %
              (float(numFilteredSources / len(residuals))))
        total.close()
    print("\n-> Complete!\n")
Beispiel #12
0
def SUBTRACT(path, method='ois', use_config_file=True):
    '''Performs difference imaging on the science images. The template image is convolved to match the science image's PSF, then the template is subtracted from the science image.
    This process is repeated for a number of different parameter configurations until an optimal residual is found.
    The actual convolution and subtraction is done with either the ``ISIS`` package (Alard) or ``hotpants`` (Becker). See documentation for details.
    
    :param str path: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets.
    :param str method: Method of difference imaging.
    :param bool use_config_file: If ``True`` all input parameters are fetched from the local *OASIS.config* file.
    
        * *ois* (default): Optimal Image Subtraction. Christohpe Alard's ``ISIS`` package.
        * *hotpants*: Andrew Becker's ``hotpants`` program. Very similar to Alard's OIS, but differs in input parameters. May be useful to try if OIS is returning inadequate results. 
    
    :returns: All science images are differenced and the corresponding residuals are placed in the **residuals** directory with the *_residual_* suffix.
    
    '''
    paths = (path.replace(' ', '')).split(',')
    del path
    for path in paths:
        if use_config_file == True:
            method = get_config_value('sub_method')
        images = glob.glob(path + '/data/*.fits')
        psf_data = glob.glob(path + '/psf/*')
        if len(psf_data) != 2 * (len(images) + 1):
            psf.PSF(path)
        else:
            print("\n-> PSFs already exist...")
        if method == '' or method == 'ois':
            subtract_ais.isis_sub(path)
            optimize.perform_optimization(path)
            MR.MR_swarp(path)
        elif sub_method == 'hotpants':
            subtract_hotpants.hotpants(path)
            optimize.perform_optimization(path)
            MR.MR_swarp(path)
        else:
            print("\n-> Error: Unknown method")
            sys.exit()
Beispiel #13
0
def swarp(location, template_perc=0.33, use_config_file=True):
    location = location[:-5]
    temps = glob.glob(location + '/templates/*.fits')
    images = glob.glob(location + '/data/*_A_.fits')
    imNum = len(images)
    numImages = 0
    if use_config_file == True:
        template_perc = initialize.get_config_value('template_perc')
    if len(temps) == 1:
        temps_name = temps[0].split('/')[-1]
        numImages = int((temps_name.split('.'))[0].split('_')[-1])
    if len(temps) == 0 or numImages != len(images):
        #delete old template
        if len(temps) != 0:
            template_name = temps[0].split('/')[-1]
            os.remove(temps[0])
            try:
                os.remove("%s/psf/%s.cat" % (location, template_name[:-5]))
                os.remove("%s/psf/%s.psf" % (location, template_name[:-5]))
            except:
                pass
        #change image shapes to match each the smallest image in the set
        print("\n-> Slicing images to a common FOV...")
        shapes = []
        areas = []
        for i in tqdm(images):
            image_data = fits.getdata(i)
            shapes.append(image_data.shape)
            areas.append((image_data.shape)[0] * (image_data.shape)[1])
        min_index = areas.index(min(areas))
        #        correct_shape = max(set(shapes), key=shapes.count)
        correct_shape = shapes[min_index]
        print("\n-> FOV size (x,y): (%d, %d)" %
              (correct_shape[0], correct_shape[1]))
        for index in tqdm(range(len(shapes))):
            s = shapes[index]
            diff = tuple(np.subtract(s, correct_shape))
            im = images[index]
            im_hdu = fits.open(im)
            im_data = im_hdu[0].data
            im_header = im_hdu[0].header
            im_mask = (im_hdu[1].data).astype(int)
            if diff != (0, 0):
                if diff[0] < 0:
                    im_data = np.concatenate(
                        (im_data, np.zeros((diff[0] * -1, s[1]))), axis=0)
                    im_mask = np.concatenate(
                        (im_mask, np.ones((diff[0] * -1, s[1]))), axis=0)
                if diff[0] > 0:
                    im_data = im_data[:-1 * diff[0]]
                    im_mask = im_mask[:-1 * diff[0]]
                if diff[1] < 0:
                    im_data = np.concatenate(
                        (im_data, np.zeros((im_data.shape[0], diff[1] * -1))),
                        axis=1)
                    im_mask = np.concatenate(
                        (im_mask, np.ones((im_mask.shape[0], diff[1] * -1))),
                        axis=1)
                if diff[1] > 0:
                    im_data = im_data[:, :diff[1] * -1]
                    im_mask = im_mask[:, :diff[1] * -1]
                hduData = fits.PrimaryHDU(im_data, header=im_header)
                hduMask = fits.ImageHDU(im_mask.astype(int))
                hduList = fits.HDUList([hduData, hduMask])
                hduList.writeto(im, overwrite=True)
            im_hdu.close()

        #change all masks into weight maps
        print("\n-> Converting all image masks into weight maps...")
        for i in tqdm(images):
            weight = sex.weight_map(i)
            hdu = fits.open(i, mode='update')
            data = hdu[0].data
            hdr = hdu[0].header
            try:
                if hdr['WEIGHT'] == 'N':
                    hdr.set('WEIGHT', 'Y')
                    hduData = fits.PrimaryHDU(data, header=hdr)
                    hduWeight = fits.ImageHDU(weight)
                    hduList = fits.HDUList([hduData, hduWeight])
                    hduList.writeto(i, overwrite=True)
            except KeyError:
                hdr.set('WEIGHT', 'Y')
                hduData = fits.PrimaryHDU(data, header=hdr)
                hduWeight = fits.ImageHDU(weight)
                hduList = fits.HDUList([hduData, hduWeight])
                hduList.writeto(i, overwrite=True)
            hdu.close()
        # choose only the top template_perc seeing images
        try:
            FWHMs = []
            for im in images:
                FWHMs.append(psf.fwhm(im))
            template_images = []
            while len(template_images) < round(template_perc * len(images)):
                template_images.append(images[FWHMs.index(np.min(FWHMs))])
                FWHMs.remove(np.min(FWHMs))
            images = template_images
        except FileNotFoundError:
            print(
                "-> Error: PSF models do not exist, run PSF method first then try again."
            )
            sys.exit()
        initialize.create_configs(location)
        config_loc = location + '/configs/default.swarp'
        if os.path.exists(config_loc):
            template = location + "/templates/swarp_median_" + str(
                imNum) + ".fits"
            with open(config_loc, 'r') as config:
                data = config.readlines()
                config.close()
            data[4] = "IMAGEOUT_NAME" + "        " + template + "\n"
            data[
                15] = "WEIGHT_IMAGE" + "        " + "@%s/templates/weights.txt" % (
                    location) + "\n"
            data[
                36] = "IMAGE_SIZE" + "        " + "%s, %s" % correct_shape[::
                                                                           -1] + "\n"
            with open(config_loc, 'w') as config:
                config.writelines(data)
                config.close()
            time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            og_templates = glob.glob(location + "/templates/*.fits")
            log_loc = location + "/templates/log.txt"
            tlist_loc = location + "/templates/template_inputs.txt"
            weight_list = "%s/templates/weights.txt" % (location)
            log_list = open(log_loc, "a+")
            template_list = open(tlist_loc, "w+")
            for i in images:
                template_list.write(str(i) + "[0]" + "\n")
            template_list.close()
            with open(weight_list, 'w+') as w:
                for i in images:
                    w.write("%s[1]\n" % (i))
            if images == []:
                print("-> No aligned images to combine\n")
            else:
                try:
                    print("-> Images being combined...\n")
                    os.system("swarp @%s -c %s" % (tlist_loc, config_loc))
                    log_list.write(
                        "template updated at %s UTC | method = median (SWarp) | images = %d\n"
                        % (str(time), len(images)))
                    log_list.close()
                    if len(og_templates) > 0:
                        for o in og_templates:
                            os.system("mv %s %s/OASIS/archive/templates" %
                                      (o, initialize.loc))
                    print(
                        "\n-> Image combination successful!\n-> Template log updated\n"
                    )
                except:
                    print("-> Image combination failed\n")
                    sys.exit()
            temp_hdu = fits.open(template)
            temp_data = temp_hdu[0].data
            temp_hdr = temp_hdu[0].header
            try:
                temp_mask = fits.getdata(
                    os.path.dirname(initialize.__file__) +
                    '/coadd.weight.fits')
            except:
                try:
                    temp_mask = fits.getdata(
                        os.path.dirname(initialize.__file__) +
                        '/AIS_temp/coadd.weight.fits')
                except:
                    print(
                        '-> Error: can\'t find coadd.weight.fits\n-> Exiting...'
                    )
                    sys.exit()
            mask_median = np.median(temp_mask)
            mask_std = np.std(temp_mask)
            threshold = mask_median - (mask_std)
            temp_mask[temp_mask < threshold] = 0
            temp_mask[temp_mask >= threshold] = 1
            masked_data = np.ma.masked_array(temp_data, mask=temp_mask)
            temp_median = np.ma.median(masked_data)
            temp_hduData = fits.PrimaryHDU(temp_data, header=temp_hdr)
            temp_hduMask = fits.ImageHDU(temp_mask)
            temp_hduList = fits.HDUList([temp_hduData, temp_hduMask])
            temp_hduList.writeto(template, overwrite=True)
            temp_hdu.close()
            temp_hdu = fits.open(template, mode='update')
            (temp_hdu[0].header).set('MEDIAN', str(temp_median))
            temp_hdu.close()
        else:
            print("\n-> No default.swarp file in target's config directory\n")
            sys.exit()
    else:
        print("-> Template already exists")
    try:
        os.remove(os.path.dirname(initialize.__file__) + '/coadd.weight.fits')
    except:
        pass
Beispiel #14
0
def sextractor_MR(location, MR_method='swarp', use_config_file=True):
    '''
    runs SExtractor on master residual
    '''
    check_MR = glob.glob("%s/residuals/MR.fits" % (location))
    if check_MR == []:
        print("-> Master residual does not exist, creating it first...")
        if use_config_file == True:
            MR_method = initialize.get_config_value('MR_method',
                                                    file_loc=location +
                                                    '/configs')
        MR.MR(location, MR_method)
    master_res = glob.glob("%s/residuals/MR.fits" % (location))
    temp = glob.glob("%s/templates/*.fits" % (location))
    if len(master_res) == 1:
        if len(temp) == 1:
            MR_image = master_res[0]
            template = temp[0]
            temp_name = template.split('/')[-1]
            temp_name = temp_name[:-5]
            MR_hdu = fits.open(MR_image)
            MR_header = MR_hdu[0].header
            saturate = MR_header['SATURATE']
            temp_hdr = fits.getheader(template)
            pixscale = temp_hdr['PIXSCALE']
            MR_hdu.close()
            FWHM = psf.fwhm_template(template)
            config_loc = location + '/configs/default.sex'
            with open(config_loc, 'r') as config:
                data = config.readlines()
                config.close()
            data[
                9] = "PARAMETERS_NAME" + "        " + location + "/configs/default.param" + "\n"
            data[
                20] = "FILTER_NAME" + "        " + location + "/configs/default.conv" + "\n"
            with open(config_loc, 'w') as config:
                config.writelines(data)
                config.close()
            print("\n-> SExtracting master residual...")
            with open(config_loc, 'r') as config:
                data = config.readlines()
                config.close()
            data[51] = "SATUR_LEVEL" + "        " + str(saturate) + "\n"
            data[62] = "SEEING_FWHM" + "        " + str(FWHM) + "\n"
            data[
                106] = "PSF_NAME" + "        " + location + "/psf/" + temp_name + ".psf" + "\n"
            data[58] = "PIXEL_SCALE" + "        " + str(pixscale) + "\n"
            data[32] = "WEIGHT_IMAGE" + "        " + "%s[1]" % (
                MR_image) + "\n"
            with open(config_loc, 'w') as config:
                config.writelines(data)
                config.close()
            os.system("sextractor %s > %s/sources/MR_sources.txt -c %s" %
                      (MR_image, location, config_loc))
            MR_filter_sources(location)
        else:
            print(
                "-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual"
            )
    else:
        print(
            "-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual"
        )