Example #1
0
def calc_noise(ts, n_chunks=1000, chunk_size=50):
    """Performs local-average calculation of the noise of a curve.

    In this case, intended to be used for the BLS spectrum noise.

    Args:
        ts (np.ndarray-like): the series to get the noise from.
        n_chunks (int): number of chunks to calculate (accuracy).
        chunk_size (int): size of each chunk, should be less
            than the size of the expected red-noise and real
            variation.

    Returns:
        sigma (float): the noise level.
    """

    # This should carry a warning, possibly cause an exception.
    if chunk_size > len(ts):
        return stats.sigmaclip(ts)[0].std()

    # Split the data into chunks.
    #import pdb; pdb.set_trace()
    start_nums = ts[:-chunk_size].sample(n_chunks, replace=True).index
    sigma_list = []
    for index in start_nums:
        chunk = ts.iloc[index:index+chunk_size]
        sigma_list.append(stats.sigmaclip(chunk)[0].std())
    #sigma = np.nanpercentile(sigma_list, 30)
    sigma = np.median(sigma_list)

    return sigma
Example #2
0
def RJmcmc_LRG_check(indir,bins=1):

    'looks at LRG output from fit_all.py Looks at all .pik files in dir and plots them'
    if not indir[-1]=='/':
        indir+='/'
    #load file names
    files=os.listdir(indir)
    print 'Plotting from %i bin' %bins
    age,norm,metal,Z=[],[],[],[]
    for i in files:
        try:
            temp=pik.load(open(indir+i))
            lab.figure()
            mc.plot_model(temp[0][str(bins)][temp[1][str(bins)].min()==
                                         temp[1][str(bins)]][0],     
                          temp[3][i[:-4]],bins)
            lab.title(i)
        #get median,lower, upper bound of parameters
            age.append(nu.percentile(sigmaclip(10**temp[0][str(bins)][:,1])[0],[50,15.9,84.1]))
            metal.append(nu.percentile(sigmaclip(temp[0][str(bins)][:,0])[0],[50,15.9,84.1]))
            norm.append(nu.percentile(sigmaclip(temp[0][str(bins)][:,2])[0],[50,15.9,84.1]))
            Z.append(float(i[:-4]))
        except:
            continue
    age,metal,norm,Z=nu.array(age),nu.array(metal),nu.array(norm),nu.array(Z)
    #make uncertanties relitive
    age[:,1],age[:,2]=nu.abs(age[:,0]-age[:,1]),nu.abs(age[:,0]-age[:,2])
    age=age/10**9.

    lab.figure()
    lab.errorbar(Z,age[:,0],yerr=age[:,1:].T,fmt='.')
    lab.xlabel('Redshift')
    lab.ylabel('Age (Gyr)')
    
    lab.show()
Example #3
0
def make_cat(filename,datamax=75000,b_sigma=3.0,b_crlim=3.0):

	if datamax == None: datamax = 75000

	hdul = fits.open(filename)
	banzai_cat = hdul['CAT'].data

	print "Total number of sources in BANZAI catalog: {0}".format(len(banzai_cat))

	ellipticities = [x['ELLIPTICITY'] for x in banzai_cat]
	backgrounds = [x['BACKGROUND'] for x in banzai_cat]
	fwhms = [x['FWHM'] for x in banzai_cat]

	filtered_el, lo, hi = sigmaclip(ellipticities, low=b_sigma, high=b_sigma)
	filtered_bg, lo, hi = sigmaclip(backgrounds, low=b_sigma, high=b_sigma)
	filtered_fwhm, lo, hi = sigmaclip(fwhms, low=b_sigma, high=b_sigma)

	id_num = 0
	sources = []

	for source in banzai_cat:
		if (source['FLAG'] == 0 
				and source['PEAK'] <= datamax
				and source['ELLIPTICITY'] in filtered_el 
				and source['BACKGROUND'] in filtered_bg
				and source['FWHM'] in filtered_fwhm 
				and source['FWHM'] > b_crlim):
			id_num += 1
			
			StN = source['PEAK']/source['BACKGROUND']	

			sources.append([source['RA'],source['DEC'],StN,id_num])

	print ("Number of sources in BANZAI catalog after filtering: "
		"{0}".format(len(sources)))
	print ("({0}-sigma clipping on source ellipticity, "
		"background level, and FWHM.)".format(b_sigma))

	#Sort by S/N	
	sources = sorted(sources, key=itemgetter(2), reverse=True)

	header = "# BEGIN CATALOG HEADER\n"
	header += "# nfields 13\n"
	header += "#     ra     1  0 d degrees %10.5f\n"
	header += "#     dec    2  0 d degrees %10.5f\n"
	header += "#     id     3  0 c INDEF %15s\n"
	header += "# END CATALOG HEADER\n"
	header += "#\n"

	with open('banzai.cat','w') as banzai_cat_file:

		banzai_cat_file.write(header)
		for source in sources:
			line = "{0:10.5f}\t{1:10.5f}\t{2}\n".format(source[0],source[1],source[3])
			banzai_cat_file.write(line)

	print "Saving the {0} best sources to banzai.cat".format(len(sources))

	hdul.close()
	return 'banzai.cat'
Example #4
0
def make_cat(filename,datamax=75000,b_sigma=3.0,b_crlim=3.0):

	if datamax == None: datamax = 75000

	hdul = fits.open(filename)
	banzai_cat = hdul[1].data

	print "Total number of sources in BANZAI catalog: {0}".format(len(banzai_cat))

	ellipticities = [x['ELLIPTICITY'] for x in banzai_cat]
	backgrounds = [x['BACKGROUND'] for x in banzai_cat]
	fwhms = [x['FWHM'] for x in banzai_cat]

	filtered_el, lo, hi = sigmaclip(ellipticities, low=b_sigma, high=b_sigma)
	filtered_bg, lo, hi = sigmaclip(backgrounds, low=b_sigma, high=b_sigma)
	filtered_fwhm, lo, hi = sigmaclip(fwhms, low=b_sigma, high=b_sigma)

	id_num = 0
	sources = []

	for source in banzai_cat:
		if (source['FLAG'] == 0 
				and source['PEAK'] <= datamax
				and source['ELLIPTICITY'] in filtered_el 
				and source['BACKGROUND'] in filtered_bg
				and source['FWHM'] in filtered_fwhm 
				and source['FWHM'] > b_crlim):
			id_num += 1
			
			StN = source['PEAK']/source['BACKGROUND']	

			sources.append([source['RA'],source['DEC'],StN,id_num])

	print ("Number of sources in BANZAI catalog after filtering: "
		"{0}".format(len(sources)))
	print ("({0}-sigma clipping on source ellipticity, "
		"background level, and FWHM.)".format(b_sigma))

	#Sort by S/N	
	sources = sorted(sources, key=itemgetter(2), reverse=True)

	header = "# BEGIN CATALOG HEADER\n"
	header += "# nfields 13\n"
	header += "#     ra     1  0 d degrees %10.5f\n"
	header += "#     dec    2  0 d degrees %10.5f\n"
	header += "#     id     3  0 c INDEF %15s\n"
	header += "# END CATALOG HEADER\n"
	header += "#\n"

	with open('banzai.cat','w') as banzai_cat_file:

		banzai_cat_file.write(header)
		for source in sources:
			line = "{0:10.5f}\t{1:10.5f}\t{2}\n".format(source[0],source[1],source[3])
			banzai_cat_file.write(line)

	print "Saving the {0} best sources to banzai.cat".format(len(sources))

	hdul.close()
	return 'banzai.cat'
Example #5
0
def find_ramps(istim, flts, lev_u, lev_d, lb=None, PAM=None, psf=0.1):

    stimdata = flts[istim, :, :] * (tendMJDs[istim] -
                                    tstrMJDs[istim]) * daytosec

    if PAM is not None:
        stimdata *= PAM

    istimgood = (stimdata > lev_d) & (stimdata < lev_u)
    print('Pixels with potentially right stimuli:', np.sum(istimgood))

    if lb is not None:
        if (istim - lb) > 0:
            st = istim - lb
        else:
            st = 0
    else:
        st = 0

    for i in range(st, istim, 1):
        persdata = flts[i, :, :] * (tendMJDs[i] - tstrMJDs[i]) * daytosec
        if PAM is not None:
            persdata *= PAM

        if (imtyps[i] == 'EXT'):
            istimgood = istimgood & (persdata < psf * stimdata)

    print('Pixels with really right stimuli:', np.sum(istimgood))

    icount = np.zeros_like(stimdata, dtype=np.int_)
    iprev = istimgood
    for i in range(istim + 1, len(imtyps), 1):

        persdata = flts[i, :, :] * (tendMJDs[i] - tstrMJDs[i]) * daytosec
        if PAM is not None:
            persdata *= PAM

        if (imtyps[i] == 'EXT'):
            msky = np.nanmean(sigmaclip(persdata, 2., 2.)[0])
            ssky = np.nanstd(sigmaclip(persdata, 2., 2.)[0])
            iskycurr = (persdata < msky + 2 * ssky) & (persdata >
                                                       msky - 2 * ssky)
        elif (imtyps[i] == 'DARK'):
            iskycurr = np.ones_like(persdata, dtype=np.bool_)
        else:
            print('Wrong image type')
            assert False

        igood = istimgood & iskycurr & iprev
        iprev = igood
        icount[igood] += 1

        print('Pixels with ramps extending for at least', i - istim,
              ' exposures:', igood.sum())

        if (np.sum(igood) == 0):
            break

    return icount
Example #6
0
    def change_head(self, FileN, catalog, image, outname, CCD, **args):

        ccdLen = len(CCD)

        #Getting the data and saving into an array
        o = open(FileN, 'r').read().splitlines()
        info_array = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2','CD1_1','CD1_2','CD2_1','CD2_2',\
                  'PV1_0','PV1_1 ','PV1_2','PV1_4','PV1_5','PV1_6','PV1_7','PV1_8','PV1_9','PV1_10',\
                  'PV2_0','PV2_1 ','PV2_2','PV2_4','PV2_5','PV2_6','PV2_7','PV2_8','PV2_9','PV2_10']

        n = len(info_array)
        matrix = []
        for ii in o:
            for oo in info_array:
                if oo in ii.split('=')[0]:
                    #print ii.split('=')[0], ii.split('=')[1].split('/')[0]
                    matrix.append(ii.split('=')[1].split('/')[0])

        matrix = np.array(matrix)

        #changing the header
        cont = 0
        for i in range(ccdLen):
            ccdstring = "%02d" % int(CCD[i])
            args['ccd'] = ccdstring
            catalog1 = self.template_file.format(
                **args) + '_' + catalog + '.fits'
            image1 = self.template_file.format(**args) + '_' + image + '.fits'

            (fwhm_, ellip, count) = self.fwhm(catalog1, 0)

            h = DESImage.load(image1)
            h.header['FWHM'] = fwhm_
            h.header['ELLIPTIC'] = ellip
            h.header['SCAMPFLG'] = 0

            im = h.data
            iterate1 = stats.sigmaclip(im, 5, 5)[0]
            iterate2 = stats.sigmaclip(iterate1, 5, 5)[0]
            iterate3 = stats.sigmaclip(iterate2, 3, 3)[0]
            skybrite = np.median(iterate3)
            skysigma = np.std(iterate3)

            h.header['SKYBRITE'] = skybrite
            h.header['SKYSIGMA'] = skysigma
            h.header['CAMSYM'] = 'D'
            h.header['SCAMPCHI'] = 0.0
            h.header['SCAMPNUM'] = 0

            for j in range(n):
                h.header[info_array[j]] = float(matrix[cont])
                cont = cont + 1

                h.save(
                    self.template_file.format(**args) + '_' + outname +
                    '.fits')
Example #7
0
def change_head(File, catalog, image, CCD, **args):

        ccdLen = len(CCD)

        #Getting the data and saving into an array
        o = open(File,'r').read().splitlines()
        info_array = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2','CD1_1','CD1_2','CD2_1','CD2_2',\
                      'PV1_0','PV1_1 ','PV1_2','PV1_4','PV1_5','PV1_6','PV1_7','PV1_8','PV1_9','PV1_10',\
                      'PV2_0','PV2_1 ','PV2_2','PV2_4','PV2_5','PV2_6','PV2_7','PV2_8','PV2_9','PV2_10']

        n = len(info_array)
        matrix = []
        for ii in o:
                for oo in info_array:
                        if oo in ii.split('=')[0] :
                                #print ii.split('=')[0], ii.split('=')[1].split('/')[0]
                                matrix.append(ii.split('=')[1].split('/')[0])

        matrix = np.array(matrix)

	#changing the header
	cont = 0
        for i in range(ccdLen):
		ccdstring="%02d"%int(CCD[i])
                args['ccd']=ccdstring
                catalog1 = template_file.format(**args)+'_'+catalog+'.fits'
                image1 = template_file.format(**args)+'_'+image+'.fits'

                fwhm_, ellip, count = fwhm(catalog1)

                h=DESImage.load(image1)
                h.header['FWHM'] = fwhm_
                h.header['ELLIPTIC'] = ellip
		h.header['SCAMPFLG'] = 0		


		h1=pyfits.open(image1)
                im=h1[0].data
                iterate1=stats.sigmaclip(im,5,5)[0]
                iterate2=stats.sigmaclip(iterate1,5,5)[0]
                iterate3=stats.sigmaclip(iterate2,3,3)[0]
                skybrite=np.median(iterate3)
                skysigma=np.std(iterate3)


                h.header['SKYBRITE'] = skybrite
                h.header['SKYSIGMA'] = skysigma
                h.header['CAMSYM'] = 'D'
                h.header['SCAMPCHI'] = 0.0
                h.header['SCAMPNUM'] = 0

                for j in range(n):
                        h.header[info_array[j]] = float(matrix[cont])
			cont =  cont + 1

                h.save(template_file.format(**args)+'_wcs.fits')
def sclip(a, s=4):
    _, low0, high0 = sigmaclip(a[:,0], low=s, high=s)
    _, low1, high1 = sigmaclip(a[:,1], low=s, high=s)
    _, low2, high2 = sigmaclip(a[:,2], low=s, high=s)
    a0bool = np.logical_and(a[:,0] > low0, a[:,0] < high0)
    a1bool = np.logical_and(a[:,1] > low1, a[:,1] < high1)
    a2bool = np.logical_and(a[:,2] > low2, a[:,2] < high2)
    k0 = np.where(a0bool)[0]
    k1 = np.where(a1bool)[0]
    k2 = np.where(a2bool)[0]
    return k0, k1, k2
Example #9
0
def show_features(df, features, labels, frac=1.0):
    """ This plot will display a fraction (frac) of all objects in the
    DataFrame (df) in 2D-feature spaces. Not all feature combinations are
    displayed but only the ones following after another in the list (features).
    The objects will be displayed in colors corresponding to their labels in
    the DataFrame. We use level 5.0 sigma clipping to center the axes on the
    majority of the objects.

    Input:
            df (DataFrame) DataFrame containing features and labels
            features (list) list of features in the DataFrame
            labels (list) list of the names of the labels in df.label
            frac (float) fraction of objects in df to be displayed

    Output:
            matplotlib figure object
    """

    df = df.sample(frac=frac)

    cols = 3
    gs = gridspec.GridSpec(len(features) // cols, cols)

    fig = plt.figure(figsize=(9, 3 * (len(features) // cols)), dpi=100)
    ax = []

    for i in range(len(features) - 1):
        row = (i // cols)
        col = i % cols
        ax.append(fig.add_subplot(gs[row, col]))

        color = iter(cm.rainbow(np.linspace(0, 1, len(labels))))

        #sigma clipping here
        x_range, xlow, xup = sigmaclip(df[features[i]], low=5.0, high=5.0)

        y_range, ylow, yup = sigmaclip(df[features[i + 1]], low=5.0, high=5.0)

        for j in range(len(labels)):
            dfj = df.query('label =="' + str(labels[j]) + '"')
            ax[-1].scatter(dfj[features[i]], dfj[features[i+1]], \
            alpha=0.2, c=next(color),edgecolor='None' )

        ax[-1].set_xlim(xlow, xup)
        ax[-1].set_ylim(ylow, yup)
        ax[-1].set_xlabel(str(features[i]))
        ax[-1].set_ylabel(str(features[i + 1]))

    plt.tight_layout()

    plt.show()
Example #10
0
def FWHM_stats(data,all=True,clip=False):
    """
    Description:
    ------------
    Return basic FWHM stats
    """
    
    if all:
        fwhm =  FWHM_all(data)
    elif clip:
        fwhm = FWHM_ave(data,clip=clip)
    else:
        fwhm = FWHM_ave(data)


    # Basic stats
    med = np.median(fwhm)
    mean = np.mean(fwhm)
    fwhm_clip, low, high = sigmaclip(fwhm,low=3,high=3)
    meanclip = np.mean(fwhm_clip)

    # Get mode using kernel density estimation (KDE)
    vals = np.linspace(0,30,1000)
    fkde = gaussian_kde(fwhm)
    fpdf = fkde(vals)
    mode = vals[np.argmax(fpdf)]

    std = np.std(fwhm)
    
    return [mean,med,mode,std,meanclip]
def measure_focal_length(tes_xy, peak_azel, npeaks=9, ntes=256, nsig=3):
    tes_dist = cdist(tes_xy, tes_xy, 'euclidean')

    fl_mean = np.zeros((npeaks, ntes))
    fl_std = np.zeros((npeaks, ntes))
    for peak in range(npeaks):
        print('Peak ', peak, '\n')
        alpha = np.deg2rad(
            cdist(peak_azel[:, :, peak], peak_azel[:, :, peak], 'euclidean'))
        tanalpha = np.tan(alpha)

        focal_length = tes_dist / tanalpha
        print(focal_length.shape)
        for tes in range(ntes):
            print(tes)
            fl = focal_length[tes]
            fl = fl[~np.isnan(fl)]
            fl_clip, mini, maxi = sigmaclip(fl, low=nsig, high=nsig)
            print(mini, maxi)
            print(fl_clip.shape)

            # Mean and STD for each TES
            fl_mean[peak, tes] = np.mean(fl_clip)
            fl_std[peak, tes] = np.std(fl_clip) / np.sqrt(len(fl_clip))

    return fl_mean, fl_std
def define_warm_threshold(image, threshold=5):
    """Pixels with signal rates above the threshold calculated here will
    be flagged as hot

    Parameters
    ----------
    image : numpy.ndarray
        2d image

    threshold : int
        Number of sigma above the mean to use for the threshold that
        defines warm/hot pixels. Pixels with signal below this level
        are considered nominal and will have signal values of 0 used
        in the dark. Pixels above this threshold will have dark signals
        based on their mean dark rate.

    Returns
    -------
    threshold : float
        Threshold value
    """
    clipped, lo, high = sigmaclip(image, low=3., high=3.)
    mean_val = np.mean(clipped)
    dev = np.std(clipped)
    threshold = mean_val + threshold * dev
    return threshold
Example #13
0
def FWHM_ave(data,clip=False,sigmas=False):
    """
    Description:
    ------------    
    Computes the average FWHM
    Expects data dictionary in format of "get_data"

    """

    raw = data['FWHMraw']
    sz = len(raw)
    FWHM = np.ones(sz)* np.nan
    sigma = np.ones(sz)* np.nan
    for i in range(sz):
        vals = np.array(raw[i]).astype('float')
        nvals = len(vals)
        minval = np.min(vals)
        maxval = np.min(vals)
        if clip:
            newvals, low, high = sigmaclip(vals,low=clip,high=clip)
            FWHM[i] = np.mean(newvals)
            sigma[i] = np.std(newvals)
        else:
            FWHM[i] = np.mean(vals)
            sigma[i] = np.std(vals)

    if sigmas:
        return [FWHM,sigma]
    else:
        return FWHM
Example #14
0
def FluxInAmp(amp, data, plot=False, **kwargs):
    list = inst.OutRegion(amp)
    frame = data[list[0]:list[1], list[2]:list[3]]
    pix = (frame).flatten()
    fact = 5
    pix2, low, upp = stats.sigmaclip(pix, fact, fact)
    print 'len pix = ', len(pix), ', len pix2 = ', len(pix2)
    if (plot == True):
        pl.hist(pix,
                histtype='bar',
                log=True,
                range=(-4000, 4000),
                bins=100,
                color='green',
                label='trial')
        pl.hist(pix2,
                histtype='bar',
                log=True,
                range=(-4000, 4000),
                bins=100,
                color='crimson',
                label='trial')
        pl.show()

    mean = np.median(pix2)
    smean = np.std(pix2)
    '''Flag clipped pixels'''

    np.clip(frame, low, upp, out=data[list[0]:list[1], list[2]:list[3]])
    print 'minmax outimage : ', np.amax(
        data[list[0]:list[1], list[2]:list[3]]), np.amin(data[list[0]:list[1],
                                                              list[2]:list[3]])

    return mean, smean, low, upp
Example #15
0
    def _clip_distances(self, distances_rad):
        """Compute a clipped max distance and calculate the number of pairs
        that pass the clipped dist.

        Parameters
        ----------
        distances_rad : `numpy.ndarray`, (N,)
            Distances between pairs.

        Returns
        -------
        output_struct : `lsst.pipe.base.Struct`
            Result struct with components:

            - ``n_matched_clipped`` : Number of pairs that survive the
              clipping on distance. (`float`)
            - ``clipped_max_dist`` : Maximum distance after clipping.
              (`float`).
        """
        clipped_dists, _, clipped_max_dist = sigmaclip(distances_rad,
                                                       low=100,
                                                       high=2)
        # Check clipped distances. The minimum value here
        # prevents over convergence on perfect test data.
        if clipped_max_dist < 1e-16:
            clipped_max_dist = 1e-16
            n_matched_clipped = np.sum(distances_rad < clipped_max_dist)
        else:
            n_matched_clipped = len(clipped_dists)

        return pipeBase.Struct(n_matched_clipped=n_matched_clipped,
                               clipped_max_dist=clipped_max_dist)
Example #16
0
    def granulation_model(self):
        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err
        with pm.Model() as model:
            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2*np.log(np.min(sigmaclip(yerr)[0])), sd=1.0)

            logw0 = pm.Bound(pm.Normal, lower=-0.5, upper=np.log(2 * np.pi / self.min_period))("logw0", mu=0.0, sd=5)
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=5)
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))

            #GP model
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2))

            # Compute the Gaussian Process likelihood and add it into the
    	    # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

    	    # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

    	    # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
Example #17
0
def makeBinPlot(x,y,diff_array,filt,outname,sigTol=3.5):

    cut_arr = stats.sigmaclip(diff_array,sigTol,sigTol)

    min_diff = np.min(cut_arr[0])
    max_diff = np.max(cut_arr[0])

    use = np.logical_and(diff_array>=min_diff,diff_array<=max_diff)

    mean, x_edges, y_edges, _ = stats.binned_statistic_2d(x[use],y[use],diff_array[use],statistic='mean',bins=20,range=[[0,4096],[0,4096]])

    cm = plt.cm.get_cmap('viridis')

    bin_cent_y = (y_edges[1:] + y_edges[:-1])*0.5
    bin_cent_x = (x_edges[1:] + x_edges[:-1])*0.5
    #

    extent=(np.min(bin_cent_x),np.max(bin_cent_x),np.min(bin_cent_y),np.max(bin_cent_y))

    fig, ax = plt.subplots(figsize=(6,6))

    cax = ax.imshow(mean.T,extent=extent,origin='lower',interpolation='nearest',vmin=min_diff,vmax=max_diff,cmap=cm)
    cbar = fig.colorbar(cax)

    title_str =  outname + filt
    ax.set_title(title_str)

    plt.savefig('./plots2107/' +outname+filt+'_bin.png',dpi=600,bbox_inches='tight')

    return None
def create_bpm_darks(frame_dark):
    '''
    Create a bad pixel map from DARK(,BACKGROUND)-files based on bias offsets and
    sigma filtering.
    Input:
        list_frame_dark: list of (mean-combined) DARK(,BACKGROUND)-frames
    Output:
        frame_bpm_dark: bad pixel map created from darks
    Function written by Rob van Holstein; constructed from functions by Christian Ginski
    Function status: verified
    '''

    # Create initial bad pixel map with only 1's
    frame_bpm_dark = np.ones(frame_dark.shape)

    # Remove outliers from dark frame and compute median and standard deviation
    frame_dark_cleaned = stats.sigmaclip(frame_dark, 5, 5)[0]
    stddev = np.nanstd(frame_dark_cleaned)
    median = np.nanmedian(frame_dark_cleaned)

    # Subtract median from dark frame and take absolute value
    frame_dark = np.abs(frame_dark - median)

    # Initialize a bad pixel array with 1 as default pixel value
    frame_bpm = np.ones(frame_dark.shape)

    # Set pixels that deviate by more than 3.5 sigma from the frame median value
    # to 0 to flag them as bad pixels
    frame_bpm[frame_dark > 3.5 * stddev] = 0

    # Add bad pixels found to master bad pixel map
    frame_bpm_dark *= frame_bpm

    return frame_bpm_dark
Example #19
0
def ellipse_continue(img, x_extent, y_extent, \
        intens, intens_err, x_center, y_center, a, b, theta, rad):
    x_cen, y_cen, a_initial, b_initial, angle, a_diff, b_diff = \
        end_values(x_extent, y_extent, x_center, y_center, a, b, theta)
    a_img = dist_ellipse_idl(x_extent, y_extent, \
        x_cen, y_cen, a_initial, b_initial, angle)
    a_remaining = closest_side(x_extent, y_extent, a_img) - a_initial
    num_measurements = long(a_remaining / a_diff) - 1
    if num_measurements > 0:
        for n in range(num_measurements):
            a_current = a_initial + n * a_diff
            logic_img = numpy.logical_and(a_img < a_current + rad, \
                a_img > a_current - rad)
            y_ind, x_ind = numpy.where(logic_img)
            annulus = img[y_ind, x_ind]
            nan_ind = numpy.where(annulus == annulus)
            annulus = annulus[nan_ind]
            clipped_annulus, lower, upper = sigmaclip(annulus)
            if n == 0:
                intens_diff = numpy.mean(clipped_annulus) - intens[-1]
            else:
                intens = numpy.append(
                    intens,
                    numpy.mean(clipped_annulus) - intens_diff)
                intens_err = numpy.append(intens_err,
                                          numpy.std(clipped_annulus))
                x_center = numpy.append(x_center, x_cen)
                y_center = numpy.append(y_center, y_cen)
                a = numpy.append(a, a_current)
                b = numpy.append(b, a_current * b_initial / a_initial)
                theta = numpy.append(theta, angle * 180 / 3.14159)
    return intens, intens_err, x_center, y_center, a, b, theta
Example #20
0
def graph_FWHM_data_range(start_date=datetime.datetime(2015,3,6),
                          end_date=datetime.datetime(2015,4,15),tenmin=True,
                          path='/home/douglas/Dropbox (Thacher)/Observatory/Seeing/Data/',
                          write=True,outpath='./'):
    
    
    plot_params()
    fwhm = get_FWHM_data_range(start_date = start_date, end_date=end_date, path=path, tenmin=tenmin)

    # Basic stats
    med = np.median(fwhm)
    mean = np.mean(fwhm)
    fwhm_clip, low, high = sigmaclip(fwhm,low=3,high=3)
    meanclip = np.mean(fwhm_clip)

    # Get mode using kernel density estimation (KDE)
    vals = np.linspace(0,30,1000)
    fkde = gaussian_kde(fwhm)
    fpdf = fkde(vals)
    mode = vals[np.argmax(fpdf)]
    std = np.std(fwhm)


    plt.ion()
    plt.figure(99)
    plt.clf()
    plt.hist(fwhm, color='darkgoldenrod',bins=35)
    plt.xlabel('FWHM (arcsec)',fontsize=16)
    plt.ylabel('Frequency',fontsize=16)
    plt.annotate('mode $=$ %.2f" ' % mode, [0.87,0.85],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    plt.annotate('median $=$ %.2f" ' % med, [0.87,0.8],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    plt.annotate('mean $=$ %.2f" ' % mean, [0.87,0.75],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')

    xvals = np.linspace(0,30,1000)
    kde = gaussian_kde(fwhm)
    pdf = kde(xvals)
    dist_c = np.cumsum(pdf)/np.sum(pdf)
    func = interp1d(dist_c,vals,kind='linear')
    lo = np.float(func(math.erfc(1./np.sqrt(2))))
    hi = np.float(func(math.erf(1./np.sqrt(2))))

    disthi = np.linspace(.684,.999,100)
    distlo = disthi-0.6827
    disthis = func(disthi)
    distlos = func(distlo)

    interval = np.min(disthis-distlos)

    plt.annotate('1 $\sigma$ int. $=$ %.2f" ' % interval, [0.87,0.70],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    
    
    plt.rcdefaults()

    plt.savefig(outpath+'Seeing_Cumulative.png',dpi=300)

    return
        def __init__(self, weights, branch_num):
            if branch_num > 1:
                global clipped_arr
            global index_count
            super(fit, self).__init__()
            self.weights = weights
            self.branch_num = branch_num
            self.index = index_count
            #self.parent_index 	= parent_index

            # Auto-calculate chi-squared
            index_weights = np.nonzero(self.weights)  # saves time!
            #chi_arr 			= ((np.dot(self.weights,models))	- data) / error
            chi_arr = np.dot(self.weights[index_weights],
                             chi_models[index_weights])

            if branch_num == 0:
                chi_clipped_arr = sigmaclip(chi_arr, low=3.0, high=3.0)
                chi_clip_sq = np.square(chi_clipped_arr[0])
                clipped_arr = (chi_arr > chi_clipped_arr[1]) & (
                    chi_arr < chi_clipped_arr[2])
                self.clipped_arr = clipped_arr
            else:
                chi_clip_sq = np.square(chi_arr[clipped_arr])

            chi_squared = np.sum(chi_clip_sq)
            #print chi_squared
            self.chi_squared = chi_squared

            index_count += 1
Example #22
0
def getlocalbackground(x, y, xgrid, ygrid, skyrad_o, skyrad_i, fltdata):

    dst = np.sqrt((xgrid - x)**2 + (ygrid - y)**2)
    msk = (dst < skyrad_o) & (dst > skyrad_i)
    skyarr = fltdata[msk]
    cskyarr, l, u = sigmaclip(skyarr, 2., 2.)
    return np.nanmean(cskyarr)
Example #23
0
 def __init__(self, lc, window_width):
     self.lc = lc
     self.window_width = window_width
     self._trace = None
     self.min_period = np.max(sigmaclip(np.diff(self.lc.lcf.time))[0])
     self.max_period = 0.5 * (self.lc.lcf.time.max() - self.lc.lcf.time.min())
     self.lctype = lc.lctype
Example #24
0
def plotImageStats(image,prefix,inter,stitle=""):

    """

    plot histogram of an image
    
    """

    
    back = sigmaclip(image, sigma=2, iters=2)
    backImage=back.mean()
    rmsImage=back.std()

    logbins = np.geomspace(image.min(), image.max(), 50)
    
    fig,ax = plt.subplots()
    ax.hist(image.flatten(),bins=logbins,histtype="step")
    plt.title("Histogram of Region of Interest"+stitle)
    plt.xlabel("Flux Value")
    plt.ylabel("N")
    plt.yscale("log")
    plt.savefig(prefix+"_stats.png")

    if(inter == 1):
        fig.show()

    return backImage,rmsImage
Example #25
0
def master_sky_plot():
    bias_data = cam.get_master_bias(-40)
    squished = cam.median_stack(i, folder)
    reduced = squished - bias_data  #Subtract master bias
    #Clip to 3 sigmas the squished image to remove recurring
    #dead/hot pixels
    reduce_clipped, _, _ = sigmaclip(reduced, 3, 3)
    reduce_clipped_s = reduce_clipped / 2
    fig, (ax1, ax2, ax3) = plt.subplots(figsize=(13, 3), ncols=3)

    sky = ax1.imshow(squished, vmin=1900, vmax=2600)
    ax1.set_title('Median-stacked(DIT=2s,NDIT=8)')
    fig.colorbar(sky, ax=ax1)

    bias_img = ax2.imshow(bias_data, vmin=1400, vmax=2000)
    ax2.set_title('Master Bias(-40C)')
    fig.colorbar(bias_img, ax=ax2)

    sky_reduced = ax3.imshow(reduced, vmin=400, vmax=700)
    ax3.set_title('Bias-subtracted')
    fig.colorbar(sky_reduced, ax=ax3)

    plt.suptitle(
        'Sky background (no filter) at airmass 2: {}ADUs/pixel/s'.format(
            round(np.mean(reduce_clipped_s), 2)))
    plt.show()
Example #26
0
def chi2_from_two_spec(spec1,spec2, wvrange=None):
    """Return the Chi2 sum of the flux differences between spec1 and spec2
    These must be in the same wavelength grid
    """
    from scipy.stats import sigmaclip

    assert spec1.npix == spec2.npix, "Specs must be of same length"
    assert np.alltrue(spec1.wavelength == spec2.wavelength), "Specs must be in the same wavelength grid"

    # obtain error2
    if (spec1.sig_is_set) and (spec2.sig_is_set):
        er2 = spec1.sig**2 + spec2.sig**2
    elif (spec1.sig_is_set):
        er2 = spec1.sig**2
    elif (spec2.sig_is_set):
        er2 = spec2.sig**2
    else:
        er2 = 1.
    # clean er2 a bit
    _ , min, max = sigmaclip(er2, low=3, high=3)
    er2 = np.where(er2 <= min, np.mean(er2), er2)

    # import pdb; pdb.set_trace()
    chi2 = (spec1.flux - spec2.flux)**2. / er2
    cond = (spec1.wavelength.to('AA').value >= wvrange[0]) & (spec1.wavelength.to('AA').value <= wvrange[1])
    chi2_dof = np.sum(chi2[cond])/len(chi2[cond])
    return chi2_dof
Example #27
0
def chi2_from_two_spec(spec1, spec2, wvrange=None):
    """Return the Chi2 sum of the flux differences between spec1 and spec2
    These must be in the same wavelength grid
    """
    from scipy.stats import sigmaclip

    assert spec1.npix == spec2.npix, "Specs must be of same length"
    assert np.alltrue(spec1.wavelength == spec2.wavelength
                      ), "Specs must be in the same wavelength grid"

    # obtain error2
    if (spec1.sig_is_set) and (spec2.sig_is_set):
        er2 = spec1.sig**2 + spec2.sig**2
    elif (spec1.sig_is_set):
        er2 = spec1.sig**2
    elif (spec2.sig_is_set):
        er2 = spec2.sig**2
    else:
        er2 = 1.
    # clean er2 a bit
    _, min, max = sigmaclip(er2, low=3, high=3)
    er2 = np.where(er2 <= min, np.mean(er2), er2)

    # import pdb; pdb.set_trace()
    chi2 = (spec1.flux - spec2.flux)**2. / er2
    cond = (spec1.wavelength.to('AA').value >=
            wvrange[0]) & (spec1.wavelength.to('AA').value <= wvrange[1])
    chi2_dof = np.sum(chi2[cond]) / len(chi2[cond])
    return chi2_dof
Example #28
0
		def __init__(self, weights, branch_num):
			if branch_num > 1:
				global clipped_arr
			global index_count
			super(fit, self).__init__()
			self.weights 		= weights
			self.branch_num 	= branch_num
			self.index 			= index_count
			#self.parent_index 	= parent_index
			
			# Auto-calculate chi-squared
			index_weights 		= np.nonzero(self.weights) # saves time!
			#chi_arr 			= ((np.dot(self.weights,models))	- data) / error
			chi_arr = np.dot(self.weights[index_weights],chi_models[index_weights])
			
			if branch_num == 0:
				chi_clipped_arr 	= sigmaclip(chi_arr, low=3.0, high=3.0)
				chi_clip_sq 		= np.square(chi_clipped_arr[0])
				clipped_arr 		= (chi_arr > chi_clipped_arr[1]) & (chi_arr < chi_clipped_arr[2])
				self.clipped_arr 	= clipped_arr
			else:
				chi_clip_sq 		= np.square(chi_arr[clipped_arr])

			chi_squared 		= np.sum(chi_clip_sq)
			#print chi_squared
			self.chi_squared 	= chi_squared 

			index_count += 1
Example #29
0
def _median_filtering_one_job(task):
    '''
    Median filter worker function for parallelization,
    works on determining the median filter

    task - the task being passed to this worker, see
           median_filtering function for details
    '''

    # Extract parameters
    (i, periodogramvals, freq_window_index_size, median_filter_size) = task

    window_vals = []  # For storing values to use in median filtering

    # Get the values to be used for the filter
    if i >= freq_window_index_size:
        if i - freq_window_index_size < 0:
            raise RuntimeError("Too small, " + str(i - freq_window_index_size))
        window_vals.extend(periodogramvals[
            max(0, i - freq_window_index_size - median_filter_size +
                1):i - freq_window_index_size + 1].tolist())
    if i + freq_window_index_size < len(periodogramvals):
        window_vals.extend(periodogramvals[i + freq_window_index_size:i +
                                           freq_window_index_size +
                                           median_filter_size].tolist())
    window_vals = np.array(window_vals)

    # Keep only finite ones
    wherefinite = np.isfinite(window_vals)

    # Sigma clipping
    vals, low, upp = sigmaclip(window_vals[wherefinite], low=3, high=3)

    # Return the median value
    return np.median(vals)
Example #30
0
def findpeaks(x, y, wid, sth, ath, pkg=None, verbose=False):
    """Find peaks in spectrum"""
    # derivative
    grad = np.gradient(y)
    # smooth derivative
    win = boxcar(wid)
    d = sp.signal.convolve(grad, win, mode='same') / sum(win)
    # size
    nx = len(x)
    # set up windowing
    if not pkg:
        pkg = wid
    hgrp = int(pkg/2)
    hgt = []
    pks = []
    sgs = []
    # loop over spectrum
    # limits to avoid edges given pkg
    for i in np.arange(pkg, (nx - pkg)):
        # find zero crossings
        if np.sign(d[i]) > np.sign(d[i+1]):
            # pass slope threshhold?
            if (d[i] - d[i+1]) > sth * y[i]:
                # pass amplitude threshhold?
                if y[i] > ath or y[i+1] > ath:
                    # get subvectors around peak in window
                    xx = x[(i-hgrp):(i+hgrp+1)]
                    yy = y[(i-hgrp):(i+hgrp+1)]
                    if len(yy) > 3:
                        try:
                            # gaussian fit
                            res, _ = curve_fit(gaus, xx, yy,
                                               p0=[y[i], x[i], 1.])
                            # check offset of fit from initial peak
                            r = abs(x - res[1])
                            t = r.argmin()
                            if abs(i - t) > pkg:
                                if verbose:
                                    print(i, t, x[i], res[1], x[t])
                            else:
                                hgt.append(res[0])
                                pks.append(res[1])
                                sgs.append(abs(res[2]))
                        except RuntimeError:
                            continue
    # clean by sigmas
    cvals = []
    cpks = []
    sgmn = None
    if len(pks) > 0:
        cln_sgs, low, upp = sigmaclip(sgs, low=3., high=3.)
        for i in range(len(pks)):
            if low < sgs[i] < upp:
                cpks.append(pks[i])
                cvals.append(hgt[i])
        sgmn = cln_sgs.mean()
        # sgmd = float(np.nanmedian(cln_sgs))
    else:
        print("No peaks found!")
    return cpks, sgmn, cvals
Example #31
0
def remove_extrema_frames(input_frames: np.ndarray,
                          n_sigma: float = 3) -> np.ndarray:
    """Remove frames with extremum mean values from the frames used in
    reference image processing/creation.

    Likely these are empty frames of pure noise or very high intensity frames
    relative to mean.

    Parameters
    ----------
    input_frames : numpy.ndarray, (N, M, K)
        Set of frames to trim.
    n_sigma : float, optional
        Number of standard deviations to above which to clip. Default is 3
        which was found to remove all empty frames while preserving most
        frames.

    Returns
    -------
    trimmed_frames : numpy.ndarray, (N, M, K)
        Set of frames with the extremum frames removed.
    """
    frame_means = np.mean(input_frames, axis=(1, 2))
    _, low_cut, high_cut = sigmaclip(frame_means, low=n_sigma, high=n_sigma)
    trimmed_frames = input_frames[np.logical_and(frame_means > low_cut,
                                                 frame_means < high_cut)]
    return trimmed_frames
def get_global_FL(tes_xy, peak_azel, npeaks=9, nsig=3):
    tes_dist = cdist(tes_xy, tes_xy, 'euclidean')
    tes_dist = np.triu(tes_dist)
    print(np.max(tes_dist))
    plt.figure()
    plt.imshow(tes_dist)

    allfl_clip, alltes_dist_cut, alltanalpha_cut = [], [], []
    for peak in range(npeaks):
        # Get all angular distances between peak position
        azel = peak_azel[:, :, peak]
        alpha = np.deg2rad(cdist(azel, azel, 'euclidean'))
        alpha = np.triu(alpha)
        tanalpha = np.tan(alpha)

        focal_length = tes_dist / tanalpha
        fl = focal_length[~np.isnan(focal_length)]
        fl_clip, mini, maxi = sigmaclip(fl, low=nsig, high=nsig)
        print(mini, maxi)
        print(fl_clip.shape)

        tes_dist_cut = tes_dist[(focal_length > mini) & (focal_length < maxi)]
        tanalpha_cut = tanalpha[(focal_length > mini) & (focal_length < maxi)]
        print(tes_dist_cut.shape, tanalpha_cut.shape)

        allfl_clip.append(fl_clip)
        alltes_dist_cut.append(tes_dist_cut)
        alltanalpha_cut.append(tanalpha_cut)

    fl_mean = [np.mean(fl) for fl in allfl_clip]
    fl_std = [np.std(fl) / np.sqrt(len(fl)) for fl in allfl_clip]

    return allfl_clip, fl_mean, fl_std
Example #33
0
def read_noise_estimate(n):
    '''
    Capture n pairs of bias frames (520REFCLKS)
    Produce difference image from each pair and store
    read noise estimate from sigma/sqrt(2) of difference
    Output histogram of final pair with RN estimate as average
    of all pairs
    '''
    cam.set_int_time(0.033)
    cam.set_frame_time(100)
    cam.printProgressBar(0,
                         2 * n,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
    y = 0
    RNs = []

    for j in range(n):
        bias_1, _ = cam.simple_cap()
        y += 1
        cam.printProgressBar(y, 2 * n)
        bias_2, _ = cam.simple_cap()
        y += 1
        cam.printProgressBar(y, 2 * n)

        bias_1 = np.asarray(bias_1, dtype=np.int32)
        bias_2 = np.asarray(bias_2, dtype=np.int32)
        #save max mean dif, max absolute dif
        bias_dif = bias_2 - bias_1

        dif_clipped = bias_dif.flatten()
        RNs.append(np.std(dif_clipped) / np.sqrt(2))
    dev = np.std(dif_clipped)
    RNs = np.array(RNs)
    RN = round(np.median(RNs), 3)
    uncert = round(3 * np.std(RNs), 2)
    sample_hist, _, _ = stats.sigmaclip(dif_clipped, 5, 5)
    N, bins, _ = plt.hist(sample_hist,bins = 265,facecolor='blue', alpha=0.75,\
                label = 'Bias Difference Image')

    def fit_function(x, B, sigma):
        return (B * np.exp(-1.0 * (x**2) / (2 * sigma**2)))
    popt, _ = optimize.curve_fit(fit_function, xdata=bins[0:-1]+0.5, \
                ydata=N, p0=[0, dev])
    xspace = np.linspace(bins[0], bins[-1], 100000)
    fit_dev = round(popt[1], 3)
    delta_sig = round(abs(fit_dev - dev), 2)
    plt.plot(xspace+0.5, fit_function(xspace, *popt), color='darkorange', \
        linewidth=2.5, label='Gaussian fit, $\Delta\sigma$:{}'.format(delta_sig))

    plt.ylabel('No. of Pixels')
    plt.xlabel('ADUs')
    plt.title(
        'Read Noise Estimate:${}\pm{}$ ADUs ($n={}$, FPA:$-40^\circ$C)'.format(
            RN, uncert, n))
    plt.legend(loc='best')
    plt.show()
    print('PROGRAM HAS COMPLETED')
Example #34
0
def fig_lombscargle(lc, min_period=None, max_period=None, Pgp=None, Pmcq=None):
    """
    display the lombscargle plots towards the light curve.
    """
    x = lc.lcf.time
    y = lc.lcf.flux
    yerr = lc.lcf.flux_err
    if min_period == None and max_period == None:
        min_period = np.max(sigmaclip(np.diff(x))[0])
        max_period = 0.5 * (x.max() - x.min())

    results = xo.estimators.lomb_scargle_estimator(x,
                                                   y,
                                                   yerr,
                                                   max_peaks=1,
                                                   min_period=min_period,
                                                   max_period=max_period,
                                                   samples_per_peak=100)

    if len(results["peaks"]) == 0:
        results = xo.estimators.lomb_scargle_estimator(x,
                                                       y,
                                                       max_peaks=1,
                                                       min_period=min_period,
                                                       max_period=max_period,
                                                       samples_per_peak=100)

    peak = results["peaks"][0]
    freq, power = results["periodogram"]
    fig = plt.figure(figsize=(6.9, 5.5))
    plt.plot(-np.log10(freq), power, "k")
    plt.axvline(np.log10(peak["period"]),
                color="k",
                lw=2,
                alpha=0.5,
                label="$P_{{LS}}:{period:.2f}d$".format(LS='LS',
                                                        period=peak['period']))
    if Pgp != None:
        plt.axvline(np.log10(Pgp),
                    color="C1",
                    lw=2,
                    alpha=0.5,
                    label="$P_{{GP}}:{period:.2f}d$".format(GP='GP',
                                                            period=Pgp))
    if Pmcq != None:
        plt.axvline(np.log10(Pmcq),
                    color="C2",
                    lw=2,
                    alpha=0.5,
                    label="$P_{{Mcq}}:{period:.2f}d$".format(Mcq='Mcq',
                                                             period=Pmcq))
    plt.xlim((-np.log10(freq)).min(), (-np.log10(freq)).max())
    plt.yticks([])
    plt.xticks(fontsize=20)
    plt.xlabel("log10(period)", fontsize=15)
    plt.ylabel("power", fontsize=15)
    plt.legend()

    return peak['period'], fig
def include_1overf(data, sigma, left, right, bottom, top, pixeldq,
                   smoothing_length, side_gain):
    '''Function to remove 1/f noise using side reference pixels.'''

    # check number of amps used
    if left > 0:
        if right > 0:
            amps = 4
        else:
            amps = 1
    else:
        amps = 1

    # get data shape and amp boundaries
    nint, ngroup, ys, xs = data.shape

    if amps == 4:
        bounds = [0, left + 508, left + 1020, left + 1532, xs]
    else:
        bounds = [0, xs]

    vbounds = [0, ys]

    # get left and right medians, then get the average of those
    for integ in range(nint):
        hold = []

        for group in range(ngroup):
            lmeds = None
            rmeds = None
            if left > 0:
                lpix = data[integ, group, :, 0:left]
                lmeds = oneoverf_medians(lpix, 0, left, pixeldq,
                                         smoothing_length)
            if right > 0:
                rpix = data[integ, group, :, xs - right:xs]
                rmeds = oneoverf_medians(rpix, xs - right, xs, pixeldq,
                                         smoothing_length)
            if (lmeds is not None):
                if (rmeds is not None):
                    meds = np.mean([lmeds, rmeds], axis=0)
                else:
                    meds = lmeds
            else:
                if (rmeds is not None):
                    meds = rmeds

            # remove the 1/f noise
            for row in range(ys):
                data[integ, group, row, :] -= side_gain * meds[row]

                # put values into a table to compare groups and amps
                cdat, datlow, dathigh = sigmaclip(data[integ, group, row, :],
                                                  low=sigma,
                                                  high=sigma)
                meandat = np.mean(cdat)
                hold.append([group, row, meandat, meds[row]])

    return data.astype(float), hold
Example #36
0
def findFirstEdge(points, candidates, default):
  result = default
  clipped, low, high = stats.sigmaclip(points, low=3.5, high=3.5)
  for candidate in candidates:
    if points[candidate] < low or points[candidate] > high:
      result = candidate
      break
  return result
Example #37
0
def main(**kwargs):
    # 1 do daophot aperture and psf photometry and run allstar

    dp = Daophot(image=kwargs.image)
    dp.PHotometry(stars=kwargs.coo)
    dp.PSf(psf_stars=kwargs.lst)
    al = Allstar(dir=dp.dir)
    al.ALlstar()
    all_s = al.ALlstars_result
    print(sigmaclip(all_s.chi)[0].mean())
    all_s.hist('chi')
Example #38
0
def plot_standard(corr="acorr"):
    os.chdir(tables_dir)
    ref = np.loadtxt("stars_lick_val_{0}.txt".format(corr)).T
    obs = np.loadtxt("stars_lick_obs_{0}.txt".format(corr)).T
    bands = np.loadtxt("bands_matching_standards.txt", usecols=(0), dtype=str).tolist()
    bands2, units, error = np.loadtxt("bands.txt", usecols=(0,9,10), dtype=str).T
    idx = [list(bands2).index(x) for x in bands]
    idx2 = np.array([list(bands).index(x) for x in bands2])
    error = error[idx]
    units = units[idx]
    units = [x.replace("Ang", "\AA") for x in units]
    fig = plt.figure(1, figsize=(20,12))
    gs = GridSpec(5,5)
    gs.update(left=0.03, right=0.988, top=0.98, bottom=0.06, wspace=0.2,
              hspace=0.4)
    offsets, errs = [], []
    for i in range(25):
        ax = plt.subplot(gs[i])
        plt.locator_params(axis="y", nbins=6)
        plt.locator_params(axis="x", nbins=6)
        ax.minorticks_on()
        # ax.plot(obs[i], ref[i] - obs[i], "ok")
        ax.axhline(y=0, ls="--", c="k")
        diff = ref[i] - obs[i]
        diff, c1, c2 = sigmaclip(diff[np.isfinite(diff)], 2.5, 2.5)
        ax.hist(diff, bins=8, color="0.7", histtype='stepfilled')
        ylim = plt.ylim()
        xlim = plt.xlim()
        xlim = np.max(np.abs(xlim))
        ax.set_ylim(0, ylim[1] + 2)
        ax.set_xlim(-xlim, xlim)
        mean = np.nanmean(diff)
        N = len(diff)
        err = np.nanstd(diff) / np.sqrt(N)
        lab = "${0:.2f}\pm{1:.2f}$".format(mean, err)
        ax.axvline(x=mean, ls="-", c="r", label=lab)
        ax.axvline(x=0, ls="--", c="k")
        # ax.axhline(y=float(error[i]))
        # ax.axhline(y=-float(error[i]))
        # ax.set_xlabel("{0} ({1})".format(bands[i].replace("_", " "), units[i]))
        ax.legend(loc=1,prop={'size':12})
        ax.set_xlabel("$\Delta$ {0} ({1})".format(bands[i].replace("_", " "),
                                                  units[i]))
        ax.set_ylabel("Frequency")
        offsets.append(mean)
        errs.append(err)
    offsets = np.array(offsets)[idx2]
    errs = np.array(errs)[idx2]
    output = os.path.join(home, "plots/lick_stars_{0}.png".format(corr))
    plt.savefig(output)
    with open(os.path.join(tables_dir, "lick_offsets.txt"), "w") as f:
        f.write("# Index Additive Correction\n")
        np.savetxt(f, np.column_stack((np.array(bands)[idx2],offsets, errs)),
                   fmt="%s")
Example #39
0
    def clipping(self):
        mask = np.ones_like(self.mag, dtype=bool)
        i = 0
        if self.sigma_clip:
            for rlo, rhi in self.mag_bin_idx_ranges:
                errors = self.err_sorted[rlo:rhi]
                if len(self.err_sorted[rlo:rhi]) > 2: # no sigmaclip for 0,1,2 element sets
                    _, elo, ehi = sigmaclip(self.err_sorted[rlo:rhi])
                    mask[rlo:rhi] = (elo <= self.err_sorted[rlo:rhi]) & (self.err_sorted[rlo:rhi] <= ehi)
#                    print(i, rlo, rhi, len(self.err_sorted[rlo:rhi]), mask[rlo:rhi].sum(), elo, ehi)
                i += 1
        return mask
Example #40
0
def test_compare_to_scipy_sigmaclip():
    # need to seed the numpy RNG to make sure we don't get some
    # amazingly flukey random number that breaks one of the tests

    with NumpyRNGContext(12345):

        randvar = randn(10000)

        astropyres = sigma_clip(randvar, sigma=3, iters=None, cenfunc=np.mean)
        scipyres = stats.sigmaclip(randvar, 3, 3)[0]

        assert astropyres.count() == len(scipyres)
        assert_equal(astropyres[~astropyres.mask].data, scipyres)
Example #41
0
def avg_compo(sample):

    """param:
    table: table file to read data from to create composite
    sample: name of the sample: main, mixed, bal"""
    
    if sample == "main":
        sample_name= ""
        comments= "Main sample, nonBALs with only EW >0"
    
    elif sample == "mixed":
        sample_name= "_mixed"
        comments= "Mixed sample, BAL and nonBAL"
    
    elif sample == "bal":
        sample_name= "_bal"
        comments= "BAL quasars only"

    table_name= "sample"+sample_name+"_myflags.fits"
    tab= Table.read(table_name)
    t= tab[tab['MY_FLAG'] ==0 ]
   
    compo_array= np.arange(1100, 4000, 0.5)

    for i in range(len(t)):
        spec_name="./new_proc_data/spec-"+str(t['PLATE'][i])+"-"+str(t['MJD'][i])+"-"+str(t['FIBERID'][i]).zfill(4)+"_proc.fits"
        spec=fits.open(spec_name)
        flx= spec[0].data[1]
        wlen= spec[0].data[0]
        norm_flx= flx/np.median(flx[2360:2390]) # normalize spectra
        compo_array= np.vstack((compo_array, norm_flx)) # 2D array. 1st row: restframe wavelength, other rows have corrected fluxes of spectra from clusters (one for each row)
        del spec

    n= len(compo_array[1:,])
    print "composite has", n, "spectra"

    clipped_compo=[]
    for j in range(compo_array.shape[1]):
        
        y= sigmaclip(compo_array[1:,j], 3, 3)
        m=median(y[0])
        clipped_compo.append(m)

    avg_compo_name= "./composites/mean_compo"+sample_name+".fits"
    spec_file= np.vstack((wlen,clipped_compo))
    hdu= fits.PrimaryHDU(spec_file)
    hdr= hdu.header
    hdr.set('SPEC_NUMBER', n)
    hdr.set('COMPOSITE', comments)
    hdu.writeto(avg_compo_name)
Example #42
0
def test_compare_to_scipy_sigmaclip():
    from numpy.random import randn

    # need to seed the numpy RNG to make sure we don't get some amazingly flukey
    # random number that breaks one of the tests

    with NumpyRNGContext(12345):  # Amazing, I've got the same combination on my luggage!

        randvar = randn(10000)

        astropyres = funcs.sigma_clip(randvar, 3, None, np.mean)[0]
        scipyres = stats.sigmaclip(randvar, 3, 3)[0]

        assert_equal(astropyres, scipyres)
Example #43
0
def iter_clip(x, width=100, sigma=3, norm=False, return_idx=False):
    x = np.array(x, copy=True)
    n = x.size
    msk = np.repeat(False, n)
    for i in range(0, n - width):
        sub = x[i : i + width]
        c, l, u = stats.sigmaclip(sub, sigma, sigma)
        idx = (sub < l) | (sub > u)
        msk[i : i + width][idx] = True
    if return_idx:
        return msk
    x[msk] = np.nan
    if norm:
        return x / np.median(x[~np.isnan(x)])
    return x
Example #44
0
def Sigmaclip(array, low=4., high=4, axis=None):
    '''(ndarray, int, int, int) -> ndarray

    Iterative sigma-clipping of along the given axis.
	   
    The output array contains only those elements of the input array `c`
    that satisfy the conditions ::
	   
    mean(c) - std(c)*low < c < mean(c) + std(c)*high
	
    Parameters
    ----------
    a : array_like
    data array
    low : float
    lower bound factor of sigma clipping
    high : float
    upper bound factor of sigma clipping
    
    Returns
    -------
    c : array
    sigma clipped mean along axis
    '''
    c = np.asarray(array)
    if axis is None or c.ndim == 1:
        #from scipy.stats import sigmaclip
        out = sigmaclip(c)[0]
        return out.mean(), out.std()
    #create masked array
    c_mask = np.ma.masked_array(c, np.isnan(c))
    delta = 1
    while delta:
           c_std = c_mask.std(axis=axis)
           c_mean = c_mask.mean(axis=axis)
           size = c_mask.mask.sum()
           critlower = c_mean - c_std*low
           critupper = c_mean + c_std*high
           indexer = [slice(None)] * c.ndim
           for i in xrange(c.shape[axis]):
               indexer[axis] = slice(i,i+1)
               c_mask[indexer].mask = np.logical_and(
                   c_mask[indexer].squeeze() > critlower, 
                   c_mask[indexer].squeeze() < critupper) == False
           delta = size - c_mask.mask.sum()
    return c_mask.mean(axis).data, c_mask.std(axis).data
Example #45
0
File: Check.py Project: nblago/kpy
def checkCube(cubename, showlamrms=False, savefig=False):
    ''' Plot a datacube for checking'''
    
    cc = np.load(cubename)

    Xs = [c.X_as for c in cc]
    Ys = [c.Y_as for c in cc]
    if showlamrms:
        Ss = [0.] * len(cc)
        for i in range(len(cc)):
            if cc[i].lamrms is not None:
                if np.isfinite(cc[i].lamrms):
                    Ss[i] = cc[i].lamrms

        c, low, upp = sigmaclip(Ss)
        Smdn = np.median(c)
        Sstd = np.nanstd(c)
        print "Nspax: %d, Nclip: %d, <RMS>: %f, RMS(std): %f" % (len(cc), (len(cc)-len(c)), Smdn, Sstd)
        smx = Smdn + 3.* Sstd
        smn = Smdn - 3.* Sstd
        if smn < 0.: smn = 0.
        cbtitle = "Wavelength RMS [nm]"
        outf = "cube_lambdarms.pdf"
    else:
        Ss = [c.trace_sigma for c in cc]
        smx = 2
        smn = 0.8
        cbtitle = "RMS trace width [pix]"
        outf = "cube_trace_sigma.pdf"

    pl.figure(1)
    pl.scatter(Xs, Ys, marker='H', linewidth=0, s=50, c=Ss, vmin=smn, vmax=smx)
    pl.title("Hexagonal Grid of Cube Positions")
    pl.xlim(-25,25)
    pl.ylim(-25,25)

    pl.colorbar(label=cbtitle)
    pl.xlabel("X position [as]")
    pl.ylabel("Y position [as]")
    pl.grid(True)
    pl.ioff()
    if savefig:
        pl.savefig(outf)
        print "Figure saved to "+outf
    else:
        pl.show()
Example #46
0
    def sigma_clip(self, data, dq, low=3.0, high=3.0):
        """Wrap the scipy.stats.sigmaclip so that data with zero variance
        is handled cleanly

        Parameters:
        -----------

        data: NDArray
            Array of pixels to be sigma-clipped

        dq: NDArray
            DQ array for data

        low: float
            lower clipping boundary, in standard deviations from the mean (default=3.0)

        high: float
            upper clipping boundary, in standard deviations from the mean (default=3.0)

        Returns:
        --------

        mean: float
            clipped mean of data array

        """

        #
        # Only calculate the clipped mean for pixels that don't have the DO_NOT_USE
        # DQ bit set
        goodpixels = np.where(np.bitwise_and(dq, dqflags.pixel['DO_NOT_USE'])==0)
        #
        # scipy routine fails if the pixels all have exactly the same value
        if np.std(data[goodpixels],dtype=np.float64) != 0.0:
            clipped_ref, lowlim, uplim = stats.sigmaclip(data[goodpixels],
                                                         low, high)
            mean = clipped_ref.mean()
        else:
            mean = data[goodpixels].mean(dtype=np.float64)
        return mean
Example #47
0
def FWHM_ave(data,clip=False,sigmas=False):
    """
    Description:
    ------------    
    Computes the average FWHM
    Expects data dictionary in format of "get_data"

    """

    raw = data['FWHMraw']
    
    sz = len(raw)
    FWHM = np.ones(sz)* np.nan
    sigma = np.ones(sz)* np.nan
    for i in range(sz):
        #remove empty strings from data before processing
        if '' in raw[i]:
            raw[i].remove('')
            
        #safeguard against wierd string formatting errors
        try:
            vals = np.array(raw[i]).astype('float')
        except ValueError:
            vals = [0]
        
        
        if clip:
            newvals, low, high = sigmaclip(vals,low=clip,high=clip)
            FWHM[i] = np.mean(newvals)
            sigma[i] = np.std(newvals)
        else:
            FWHM[i] = np.mean(vals)
            sigma[i] = np.std(vals)

    FWHM = np.nan_to_num(FWHM)
    if sigmas:
        return [FWHM,sigma]
    else:
        return FWHM
Example #48
0
def _isigclip(i, istack):
    mn = []
    for col in istack:
        clipped, bot, top = sigmaclip(col, low=3, high=3)
        mn.append(clipped.mean())
    return np.array(mn)
Example #49
0
def getGlobalSky(imgArr,
                 mskAll,
                 skyClip=3,
                 zp=27.0,
                 pix=0.168,
                 rebin=4,
                 prefix='coadd_sky',
                 suffix='global_',
                 verbose=True,
                 visual=True,
                 nClip=2):
    """
    Estimate the Global Sky.

    Estimating the global sky background level by using the mean
    of a rebined image

    This could also be used to estimate the expect surface brightness
    limit of the image

    """
    # Estimate the global background level
    dimX, dimY = imgArr.shape

    # Pixel values of all pixels that are not masked out (before rebinned)
    pixels = imgArr[mskAll == 0].flatten()
    pixels = pixels[np.isfinite(pixels)]
    try:
        pixNoMsk, low3, upp3 = sigmaclip(pixels, low=skyClip, high=skyClip)
    except Exception:
        warnings.warn("\n### sigmaclip failed for original image!")
        pixNoMsk = pixels
        del pixels

    try:
        # Rebin image
        dimBinX = int((dimX - 1) / rebin)
        dimBinY = int((dimY - 1) / rebin)
        imgBin = hUtil.congrid(imgArr, (dimBinX, dimBinY), method='nearest')
        mskBin = hUtil.congrid(mskAll, (dimBinX, dimBinY), method='neighbour')
    except Exception:
        warnings.warn('### congrid failed!')
        print("\n###    Image rebin is failed for this galaxy !!!")
        imgBin = imgArr
        mskBin = mskAll

    # Get all the pixels that are not masked out
    pixels = imgBin[mskBin == 0].flatten()
    pixels = pixels[np.isfinite(pixels)]
    try:
        pixNoMskBin, low4, upp4 = sigmaclip(pixels, low=skyClip, high=skyClip)
    except Exception:
        warnings.warn("### sigmaclip failed for binned image!")
        pixNoMskBin = pixels

    numSkyPix = len(pixNoMskBin)
    # Get the basic statistics of the global sky
    skyAvg, skyStd = np.nanmean(pixNoMskBin), np.nanstd(pixNoMskBin)
    if not np.isfinite(skyAvg) or not np.isfinite(skyStd):
        warnings.warn("\n###    No useful global skyAvg / Std for %s" % prefix)
    skyMed = np.nanmedian(pixNoMskBin)
    if not np.isfinite(skyMed):
        warnings.warn("\n###    No useful global skyMed for %s" % prefix)
        skyMed = skyAvg if np.isfinite(skyAvg) else 0.00

    skySkw = scipy.stats.skew(pixNoMskBin)
    sbExpt = cdPrep.getSbpValue(3.0 * skyStd, pix * rebin, pix * rebin, zp=zp)

    if not np.isfinite(sbExpt):
        warnings.warn("\n###    No useful global sbExpt for %s" % prefix)
    if verbose:
        print("###    Median / Mean / Std / Skew / SBP: " +
              " %8.5f / %8.5f / %8.5f / %8.5f / %5.2f" % (skyMed, skyAvg,
                                                          skyStd, skySkw,
                                                          sbExpt))

    if visual:
        skyPNG = prefix + '_' + suffix + 'skyhist.png'
        showSkyHist(
            pixNoMskBin,
            skypix2=pixNoMsk,
            sbExpt=sbExpt,
            pngName=skyPNG,
            skyAvg=skyAvg,
            skyMed=skyMed,
            skyStd=skyStd,
            skySkw=skySkw)

    """Save a txt file summary"""
    skyTxt = prefix + '_' + suffix + 'sky.dat'
    text_file = open(skyTxt, "w")
    text_file.write("IMAGE: %s \n" % prefix)
    text_file.write("REBIN: %3d \n" % rebin)
    text_file.write("NSKYPIX: %10d \n" % numSkyPix)
    text_file.write("SKYMED: %10.6f \n" % skyMed)
    text_file.write("SKYAVG: %10.6f \n" % skyAvg)
    text_file.write("SKYSTD: %10.6f \n" % skyStd)
    text_file.write("SKYSKW: %10.6f \n" % skySkw)
    text_file.write("SBEXPT: %10.6f \n" % sbExpt)
    text_file.close()

    return numSkyPix, skyMed, skyAvg, skyStd, skySkw, sbExpt
Example #50
0
def aper(image,xc,yc, phpadu=1, apr=5, zeropoint=25,
         skyrad=[40,50], badpix=[0,0], setskyval = None, minsky=[],
         skyalgorithm='mmm', exact = False, readnoise = 0,
         verbose=True, debug=False):
    """ Compute concentric aperture photometry on one ore more stars
    (adapted for IDL from DAOPHOT, then translated from IDL to Python).

    APER can compute photometry in several user-specified aperture radii.
    A separate sky value is computed for each source using specified inner
    and outer sky radii.

    By default, APER uses a magnitude system where a magnitude of
    25 corresponds to 1 flux unit. APER returns both
    fluxes and magnitudes.

     REQUIRED INPUTS:
         image  -  input image array
         xc     - scalar x value or 1D array of x coordinates.
         yc     - scalar y value or 1D array of y coordinates

     OPTIONAL KEYWORD INPUTS:
         phpadu - Photons per Analog Digital Units, numeric scalar.  Converts
                   the data numbers in IMAGE to photon units.  (APER assumes
                   Poisson statistics.)
         apr    - scalar or 1D array of photometry aperture radii in pixel units.
         zeropoint - zero point for converting flux (in ADU) to magnitudes
         skyrad - Two element list giving the inner and outer radii
                   to be used for the sky annulus
         badpix - Two element list giving the minimum and maximum value
                   of a good pix. If BADPIX[0] is equal to BADPIX[1] then
                   it is assumed that there are no bad pixels.

         exact -  By default, APER counts subpixels, but uses a polygon
                 approximation for the intersection of a circular aperture with
                 a square pixel (and normalize the total area of the sum of the
                 pixels to exactly match the circular area).   If the /EXACT
                 keyword, then the intersection of the circular aperture with a
                 square pixel is computed exactly.    The /EXACT keyword is much
                 slower and is only needed when small (~2 pixels) apertures are
                 used with very undersampled data.

         print - if set and non-zero then APER will also write its results to
                   a file aper.prt.   One can specify the output file name by
                   setting PRINT = 'filename'.
         verbose -  Print warnings, status, and ancillary info to the terminal
         setskyval - Use this keyword to force the sky to a specified value
                   rather than have APER compute a sky value.    SETSKYVAL
                   can either be a scalar specifying the sky value to use for
                   all sources, or a 3 element vector specifying the sky value,
                   the sigma of the sky value, and the number of elements used
                   to compute a sky value.   The 3 element form of SETSKYVAL
                   is needed for accurate error budgeting.
         skyalgorithm - set the algorithm by which the sky value is determined
                  Valid options are 'sigmaclipping' or 'mmm'.

     RETURNS:
         mags   -  NAPER by NSTAR array giving the magnitude for each star in
                   each aperture.  (NAPER is the number of apertures, and NSTAR
                   is the number of stars).   A flux of 1 digital unit is assigned
                   a zero point magnitude of 25.
         magerr  -  NAPER by NSTAR array giving error in magnitude
                   for each star.  If a magnitude could not be deter-
                   mined then ERRAP = 9.99.
         flux    -  NAPER by NSTAR array giving fluxes
         fluxerr -  NAPER by NSTAR array giving error in each flux
         sky  -    NSTAR element array giving sky value for each star
         skyerr -  NSTAR element array giving error in sky values
         outstr  - string for each star and aperture reporting the mag and err

     PROCEDURES USED:
           MMM, PIXWT()
     NOTES:
           Reasons that a valid magnitude cannot be computed include the following:
          (1) Star position is too close (within 0.5 pixels) to edge of the frame
          (2) Less than 20 valid pixels available for computing sky
          (3) Modal value of sky could not be computed by the procedure MMM
          (4) *Any* pixel within the aperture radius is a "bad" pixel

           APER was modified in June 2000 in two ways: (1) the /EXACT keyword was
           added (2) the approximation of the intersection of a circular aperture
           with square pixels was improved (i.e. when /EXACT is not used)
     REVISON HISTORY:
           Adapted to IDL from DAOPHOT June, 1989   B. Pfarr, STX
           Adapted for IDL Version 2,               J. Isensee, July, 1990
           Code, documentation spiffed up           W. Landsman   August 1991
           TEXTOUT may be a string                  W. Landsman September 1995
           FLUX keyword added                       J. E. Hollis, February, 1996
           SETSKYVAL keyword, increase maxsky       W. Landsman, May 1997
           Work for more than 32767 stars           W. Landsman, August 1997
           Converted to IDL V5.0                    W. Landsman   September 1997
           Don't abort for insufficient sky pixels  W. Landsman  May 2000
           Added /EXACT keyword                     W. Landsman  June 2000
           Allow SETSKYVAL = 0                      W. Landsman  December 2000
           Set BADPIX[0] = BADPIX[1] to ignore bad pixels W. L.  January 2001
           Fix chk_badpixel problem introduced Jan 01 C. Ishida/W.L. February 2001
           Converted from IDL to python             D. Jones January 2014
           Adapted for hstphot project              S. Rodney  July 2014
    """

    if verbose > 1:
        import time
        tstart = time.time()
    elif verbose: import time

    if debug :
        import pdb
        pdb.set_trace()

    # Force np arrays
    if not np.iterable( xc ):
        xc = np.array([xc])
        yc = np.array([yc])
    assert len(xc) == len(yc), 'xc and yc arrays must be identical length.'

    if not np.iterable( apr ) :
        apr = np.array( [ apr ] )
    Naper = len( apr ) # Number of apertures
    Nstars = len( xc )   # Number of stars to measure

    # Set parameter limits
    if len(minsky) == 0: minsky = 20

    # Number of columns and rows in image array
    s = np.shape(image)
    ncol = s[1]
    nrow = s[0]


    if setskyval is not None :
        if not np.iterable(setskyval) :
            setskyval = [setskyval,0.,1.]
        assert len(setskyval)==3, 'Keyword SETSKYVAL must contain 1 or 3 elements'
        skyrad = [ 0., np.max(apr) + 1]    #use np.max (max function does not work on scalars)
    skyrad = asfarray(skyrad)




    # String array to display mags for all apertures in one line for each star
    outstr = [ '' for star in range(Nstars)]

    # Declare arrays
    mag = zeros( [ Nstars, Naper])
    magerr =  zeros( [ Nstars, Naper])
    flux = zeros( [ Nstars, Naper])
    fluxerr =  zeros( [ Nstars, Naper])
    badflag = zeros( [ Nstars, Naper])
    sky = zeros( Nstars )
    skyerr = zeros( Nstars )
    area = np.pi*apr*apr  # Area of each aperture

    if exact:
        bigrad = apr + 0.5
        smallrad = apr/np.sqrt(2) - 0.5 

    if setskyval is None :
        rinsq =  skyrad[0]**2
        routsq = skyrad[1]**2

    #  Compute the limits of the submatrix.   Do all stars in vector notation.
    lx = (xc-skyrad[1]).astype(int)  # Lower limit X direction
    ly = (yc-skyrad[1]).astype(int)  # Lower limit Y direction
    ux = (xc+skyrad[1]).astype(int)  # Upper limit X direction
    uy = (yc+skyrad[1]).astype(int)  # Upper limit Y direction

    lx[where(lx < 0)[0]] = 0
    ux[where(ux > ncol-1)[0]] = ncol-1
    nx = ux-lx+1                         # Number of pixels X direction

    ly[where(ly < 0)[0]] = 0
    uy[where(uy > nrow-1)[0]] = nrow-1
    ny = uy-ly +1                      # Number of pixels Y direction

    dx = xc-lx                         # X coordinate of star's centroid in subarray
    dy = yc-ly                         # Y coordinate of star's centroid in subarray

    # Find the edge of the subarray that is closest to each star
    # and then flag any stars that are too close to the edge or off-image
    edge = zeros(len(dx))
    for i,dx1,nx1,dy1,ny1 in zip(range(len(dx)),dx,nx,dy,ny):
        edge[i] = min([(dx[i]-0.5),(nx[i]+0.5-dx[i]),(dy[i]-0.5),(ny[i]+0.5-dy[i])])
    badstar = np.where( (xc<0.5) | (xc>ncol-1.5) |
                        (yc<0.5) | (yc>nrow-1.5), 1, 0 )
    if np.any( badstar ) :
        nbad = badstar.sum()
        print('WARNING [aper.py] - ' + str(nbad) + ' star positions outside image')

    if verbose :
        tloop = time.time()
    for i in range(Nstars):  # Compute magnitudes for each star
        while True :
            # mimic GOTO statements : break out of this while block whenever
            # we decide this star is bad
            apflux = asarray([np.nan]*Naper)
            apfluxerr = asarray([np.nan]*Naper)
            apmag = asarray([np.nan]*Naper)
            apmagerr = asarray([np.nan]*Naper)
            skymod = 0.  # Sky mode
            skysig = 0.  # Sky sigma
            skyskw = 0.  # Sky skew
            error1 = asarray([np.nan]*Naper)
            error2 = asarray([np.nan]*Naper)
            error3 = array([np.nan]*Naper)
            apbad = np.ones( Naper )
            if badstar[i]: # star is bad, return NaNs for all values
                break

            rotbuf = image[ ly[i]:uy[i]+1,lx[i]:ux[i]+1 ] #Extract subarray from image
            shapey,shapex = np.shape(rotbuf)[0],np.shape(rotbuf)[1]
            #  RSQ will be an array, the same size as ROTBUF containing the square of
            #      the distance of each pixel to the center pixel.

            dxsq = ( arange( nx[i] ) - dx[i] )**2
            rsq = np.ones( [ny[i], nx[i]] )
            for ii  in range(ny[i]):
                rsq[ii,:] = dxsq + (ii-dy[i])**2

            if exact:
                nbox = range(nx[i]*ny[i])
                xx = (nbox % nx[i]).reshape( ny[i], nx[i])
                yy = (nbox/nx[i]).reshape(ny[i],nx[i])
                x1 = np.abs(xx-dx[i]) 
                y1 = np.abs(yy-dy[i])
            else:
                r = np.sqrt(rsq) - 0.5    #2-d array of the radius of each pixel in the subarray

            rsq,rotbuf = rsq.reshape(shapey*shapex),rotbuf.reshape(shapey*shapex)
            if setskyval is None :
                # skypix will be 1-d array of sky pixels
                skypix = np.zeros( rsq.shape )

                #  Select pixels within sky annulus,
                skypix[where(( rsq >= rinsq ) &
                             ( rsq <= routsq ))[0]] = 1
                if badpix[0]!=badpix[1] :
                    # Eliminate pixels above or below the badpix threshold vals
                    skypix[where(((rotbuf < badpix[0]) | (rotbuf > badpix[1])) &
                                 (skypix == 1))[0]] = 0
                sindex =  where(skypix)[0]
                nsky = len(sindex)

                if ( nsky < minsky ):   # Insufficient sky pixels?
                    if verbose:
                        print("ERROR: nsky=%i is fewer than minimum %i valid pixels in the sky annulus."%(nsky,minsky))
                    break

                skybuf = rotbuf[ sindex[0:nsky] ]
                if skyalgorithm.startswith('sigmaclip'):
                    # The sky annulus is (nearly) empty of stars, (as in a diff image)
                    # so we can simply compute the sigma-clipped mean of all pixels in
                    # the annulus
                    skybufclipped,lothresh,hithresh = sigmaclip( skybuf, low=4.0, high=4.0)
                    skymod = np.mean( skybufclipped )
                    skysig = np.std( skybufclipped )
                    skyskw = skew( skybufclipped )

                else:
                    # Compute the sky mode, sigma and skewness using the
                    # mean/median/mode algorithm in mmm.py, which assumes that
                    # most of the outlier pixels are positive.
                    skymod, skysig, skyskw = mmm.mmm(skybuf,readnoise=readnoise,minsky=minsky)

                skyvar = skysig**2    #Variance of the sky brightness
                sigsq = skyvar/nsky  #Square of standard error of mean sky brightness
             
                if ( skysig < 0.0 ):
                    # If the modal sky value could not be determined, then all
                    # apertures for this star are bad.  So skip to the next.
                    break

                if skysig > 999.99: skysig = 999      #Don't overload output formats
                if skyskw < -99: skyskw = -99
                if skyskw > 999.9: skyskw = 999.9

            else:
                skymod = setskyval[0]
                skysig = setskyval[1]
                nsky = setskyval[2]
                skyvar = skysig**2
                sigsq = skyvar/nsky
                skyskw = 0

            for k in range(Naper): # Find pixels within each aperture
                if ( edge[i] >= apr[k] ):   #Does aperture extend outside the image?
                    if exact:
                        mask = zeros(ny[i]*nx[i])

                        x1,y1 = x1.reshape(ny[i]*nx[i]),y1.reshape(ny[i]*nx[i])
                        igoodmag = where( ( x1 < smallrad[k] ) & (y1 < smallrad[k] ))[-1]
                        Ngoodmag = len(igoodmag)
                        if Ngoodmag > 0: mask[igoodmag] = 1
                        bad = where(  (x1 > bigrad[k]) | (y1 > bigrad[k] ))[-1]
                        mask[bad] = -1

                        gfract = where(mask == 0.0)[0] 
                        Nfract = len(gfract)
                        if Nfract > 0:
                            yygfract = yy.reshape(ny[i]*nx[i])[gfract]
                            xxgfract = xx.reshape(ny[i]*nx[i])[gfract] 

                            mask[gfract] = pixwt.Pixwt(dx[i],dy[i],apr[k],xxgfract,yygfract)
                            mask[gfract[where(mask[gfract] < 0.0)[0]]] = 0.0
                        thisap = where(mask > 0.0)[0]

                        thisapd = rotbuf[thisap]
                        fractn = mask[thisap]
                    else:
                        # approximating the circular aperture shape
                        rshapey,rshapex = np.shape(r)[0],np.shape(r)[1]
                        thisap = where( r.reshape(rshapey*rshapex) < apr[k] )[0]   # Select pixels within radius
                        thisapd = rotbuf.reshape(rshapey*rshapex)[thisap]
                        thisapr = r.reshape(rshapey*rshapex)[thisap]
                        fractn = apr[k]-thisapr 
                        fractn[where(fractn > 1)[0]] = 1
                        fractn[where(fractn < 0)[0]] = 0  # Fraction of pixels to count
                        full = zeros(len(fractn))
                        full[where(fractn == 1)[0]] = 1.0
                        gfull = where(full)[0]
                        Nfull = len(gfull)
                        gfract = where(1 - full)[0]
                        factor = (area[k] - Nfull ) / np.sum(fractn[gfract])
                        fractn[gfract] = fractn[gfract]*factor
                else:
                    if verbose :
                        print("WARNING [aper.py]: aperture extends outside the image!")
                    continue
                    # END "if exact ...  else ..."

                # Check for any bad pixel values (nan,inf) and those outside
                # the user-specified range of valid pixel values.  If any
                # are found in the aperture, raise the badflux flag.
                apbad[k] = 0
                if not np.all( np.isfinite(thisapd) ) :
                    if verbose :
                        print("WARNING : nan or inf pixels detected in aperture.\n"
                              "We're setting these to 0, but the photometry"
                              "may be biased.")
                    thisapd[np.isfinite(thisapd)==False] = 0
                    apbad[k] = 1
                    fractn = 0
                if badpix[0] < badpix[1] :
                    ibadpix = np.where((thisapd<=badpix[0]) | (thisapd>=badpix[1]))
                    if len(ibadpix[0]) > 0 :
                        if verbose :
                            print("WARNING : pixel values detected in aperture"
                                  " that are outside of the allowed range "
                                  " [%.1f , %.1f] \n"%(badpix[0],badpix[1]) +
                                  "We're treating these as 0, but the "
                                  "photometry may be biased.")
                        thisapd[ibadpix] = 0
                        apbad[k] = 1
                # Sum the flux over the irregular aperture
                apflux[k] = np.sum(thisapd*fractn)
            # END for loop over apertures

            igoodflux = where(np.isfinite(apflux))[0]
            Ngoodflux = len(igoodflux)
            if Ngoodflux > 0:
                if verbose > 2 :
                    print(" SRCFLUX   APFLUX    SKYMOD   AREA")
                    for igf in igoodflux :
                        print("%.4f   %.4f   %.4f   %.4f "%(apflux[igf]-skymod*area[igf],apflux[igf],skymod,area[igf]))
                # Subtract sky from the integrated brightnesses
                apflux[igoodflux] = apflux[igoodflux] - skymod*area[igoodflux]

            # Compute flux error
            error1[igoodflux] = area[igoodflux]*skyvar   #Scatter in sky values
            error2[igoodflux] = np.abs(apflux[igoodflux])/phpadu  #Random photon noise
            error3[igoodflux] = sigsq*area[igoodflux]**2  #Uncertainty in mean sky brightness
            apfluxerr[igoodflux] = np.sqrt(error1[igoodflux] + error2[igoodflux] + error3[igoodflux])

            igoodmag = where (apflux > 0.0)[0]  # Are there any valid integrated fluxes?
            Ngoodmag = len(igoodmag)
            if ( Ngoodmag > 0 ) : # convert valid fluxes to mags
                apmagerr[igoodmag] = 1.0857*apfluxerr[igoodmag]/apflux[igoodmag]   #1.0857 = log(10)/2.5
                apmag[igoodmag] =  zeropoint-2.5*np.log10(apflux[igoodmag])
            break # Closing the 'while True' loop.

        # TODO : make a more informative output string
        outstr[i] = '%.3f,%.3f :'%(xc[i],yc[i]) + \
                    '  '.join( [ '%.4f+-%.4f'%(apmag[ii],apmagerr[ii])
                                 for ii in range(Naper) ] )

        sky[i] = skymod
        skyerr[i] = skysig
        mag[i,:] = apmag
        magerr[i,:]= apmagerr
        flux[i,:] = apflux
        fluxerr[i,:]= apfluxerr
        badflag[i,:] = apbad

    if Nstars == 1 :
        sky = sky[0]
        skyerr = skyerr[0]
        mag = mag[0]
        magerr = magerr[0]
        flux = flux[0]
        fluxerr = fluxerr[0]
        badflag = badflag[0]
        outstr = outstr[0]

    if verbose>1:
        print('hstphot.aper took %.3f seconds'%(time.time()-tstart))
        print('Each of %i loops took %.3f seconds'%(Nstars,(time.time()-tloop)/Nstars))

    return(mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr)
Example #51
0
def apphot_ps1stars(ccd, ps,
                    apertures,
                    decals,
                    sky_inner_r=40,
                    sky_outer_r=50):
    im = decals.get_image_object(ccd)

    tim = im.get_tractor_image(gaussPsf=True, splinesky=True)
    img = tim.getImage()

    wcs = tim.subwcs
    
    magrange = (15,21)
    ps1 = ps1cat(ccdwcs=wcs)
    ps1 = ps1.get_stars(magrange=magrange)
    print 'Got', len(ps1), 'PS1 stars'
    band = ccd.filter
    piband = ps1cat.ps1band[band]
    print 'band:', band

    ps1.cut(ps1.nmag_ok[:,piband] > 0)
    print 'Keeping', len(ps1), 'stars with nmag_ok'
    
    ok,x,y = wcs.radec2pixelxy(ps1.ra, ps1.dec)
    apxy = np.vstack((x - 1., y - 1.)).T

    ap = []
    aperr = []
    nmasked = []
    with np.errstate(divide='ignore'):
        ie = tim.getInvError()
        imsigma = 1. / ie
        imsigma[ie == 0] = 0
    mask = (imsigma == 0)
    for rad in apertures:
        aper = photutils.CircularAperture(apxy, rad)
        p = photutils.aperture_photometry(img, aper, error=imsigma, mask=mask)
        aperr.append(p.field('aperture_sum_err'))
        ap.append(p.field('aperture_sum'))
        p = photutils.aperture_photometry((ie == 0), aper)
        nmasked.append(p.field('aperture_sum'))
    ap = np.vstack(ap).T
    aperr = np.vstack(aperr).T
    nmasked = np.vstack(nmasked).T

    print 'Aperture fluxes:', ap[:5]
    print 'Aperture flux errors:', aperr[:5]
    print 'Nmasked:', nmasked[:5]
    
    H,W = img.shape
    sky = []
    skysigma = []
    skymed = []
    skynmasked = []
    for xi,yi in zip(x,y):
        ix = int(np.round(xi))
        iy = int(np.round(yi))
        skyR = sky_outer_r
        xlo = max(0, ix-skyR)
        xhi = min(W, ix+skyR+1)
        ylo = max(0, iy-skyR)
        yhi = min(H, iy+skyR+1)
        xx,yy = np.meshgrid(np.arange(xlo,xhi), np.arange(ylo,yhi))
        r2 = (xx - xi)**2 + (yy - yi)**2
        inannulus = ((r2 >= sky_inner_r**2) * (r2 < sky_outer_r**2))
        unmasked = (ie[ylo:yhi, xlo:xhi] > 0)
        
        #sky.append(np.median(img[ylo:yhi, xlo:xhi][inannulus * unmasked]))

        skypix = img[ylo:yhi, xlo:xhi][inannulus * unmasked]
        # this is the default value...
        nsigma = 4.
        goodpix,lo,hi = sigmaclip(skypix, low=nsigma, high=nsigma)
        # sigmaclip returns unclipped pixels, lo,hi, where lo,hi are
        # mean(goodpix) +- nsigma * sigma
        meansky = np.mean(goodpix)
        sky.append(meansky)
        skysigma.append((meansky - lo) / nsigma)
        skymed.append(np.median(skypix))
        skynmasked.append(np.sum(inannulus * np.logical_not(unmasked)))
    sky = np.array(sky)
    skysigma = np.array(skysigma)
    skymed = np.array(skymed)
    skynmasked = np.array(skynmasked)

    print 'sky', sky[:5]
    print 'median sky', skymed[:5]
    print 'sky sigma', skysigma[:5]

    psmag = ps1.median[:,piband]

    ap2 = ap - sky[:,np.newaxis] * (np.pi * apertures**2)[np.newaxis,:]
    
    if ps is not None:
        plt.clf()
        nstars,naps = ap.shape
        for iap in range(naps):
            plt.plot(psmag, ap[:,iap], 'b.')
        #for iap in range(naps):
        #    plt.plot(psmag, ap2[:,iap], 'r.')
        plt.yscale('symlog')
        plt.xlabel('PS1 %s mag' % band)
        plt.ylabel('DECam Aperture Flux')
    
        #plt.plot(psmag, nmasked[:,-1], 'ro')
        plt.plot(np.vstack((psmag,psmag)), np.vstack((np.zeros_like(psmag),nmasked[:,-1])), 'r-', alpha=0.5)
        plt.ylim(0, 1e3)
        ps.savefig()    
    
        plt.clf()
        plt.plot(ap.T / np.max(ap, axis=1), '.')
        plt.ylim(0, 1)
        ps.savefig()
    
        plt.clf()
        dimshow(tim.getImage(), **tim.ima)
        ax = plt.axis()
        plt.plot(x, y, 'o', mec='r', mfc='none', ms=10)
        plt.axis(ax)
        ps.savefig()

    color = ps1_to_decam(ps1.median, band)
    print 'Color terms:', color

    
    T = fits_table()
    T.apflux = ap.astype(np.float32)
    T.apfluxerr = aperr.astype(np.float32)
    T.apnmasked = nmasked.astype(np.int16)

    # Zero out the errors when pixels are masked
    T.apfluxerr[T.apnmasked > 0] = 0.

    #T.apflux2 = ap2.astype(np.float32)
    T.sky = sky.astype(np.float32)
    T.skysigma = skysigma.astype(np.float32)
    T.expnum = np.array([ccd.expnum] * len(T))
    T.ccdname = np.array([ccd.ccdname] * len(T)).astype('S3')
    T.band = np.array([band] * len(T))
    T.ps1_objid = ps1.obj_id
    T.ps1_mag = psmag + color
    T.ra  = ps1.ra
    T.dec = ps1.dec
    T.tai = np.array([tim.time.toMjd()] * len(T)).astype(np.float32)
    T.airmass = np.array([tim.primhdr['AIRMASS']] * len(T)).astype(np.float32)
    T.x = (x + tim.x0).astype(np.float32)
    T.y = (y + tim.y0).astype(np.float32)

    if False:
        plt.clf()
        plt.plot(skymed, sky, 'b.')
        plt.xlabel('sky median')
        plt.ylabel('sigma-clipped sky')
        ax = plt.axis()
        lo,hi = min(ax),max(ax)
        plt.plot([lo,hi],[lo,hi],'k-', alpha=0.25)
        plt.axis(ax)
        ps.savefig()
    
    return T, tim.primhdr
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--infile", required=True, help="Tabular file.")
    parser.add_argument("-o", "--outfile", required=True, help="Path to the output file.")
    parser.add_argument("--sample_one_cols", help="Input format, like smi, sdf, inchi")
    parser.add_argument("--sample_two_cols", help="Input format, like smi, sdf, inchi")
    parser.add_argument("--sample_cols", help="Input format, like smi, sdf, inchi,separate arrays using ;")
    parser.add_argument("--test_id", help="statistical test method")
    parser.add_argument(
        "--mwu_use_continuity",
        action="store_true",
        default=False,
        help="Whether a continuity correction (1/2.) should be taken into account.",
    )
    parser.add_argument(
        "--equal_var",
        action="store_true",
        default=False,
        help="If set perform a standard independent 2 sample test that assumes equal population variances. If not set, perform Welch's t-test, which does not assume equal population variance.",
    )
    parser.add_argument(
        "--reta", action="store_true", default=False, help="Whether or not to return the internally computed a values."
    )
    parser.add_argument("--fisher", action="store_true", default=False, help="if true then Fisher definition is used")
    parser.add_argument(
        "--bias",
        action="store_true",
        default=False,
        help="if false,then the calculations are corrected for statistical bias",
    )
    parser.add_argument("--inclusive1", action="store_true", default=False, help="if false,lower_limit will be ignored")
    parser.add_argument(
        "--inclusive2", action="store_true", default=False, help="if false,higher_limit will be ignored"
    )
    parser.add_argument("--inclusive", action="store_true", default=False, help="if false,limit will be ignored")
    parser.add_argument(
        "--printextras",
        action="store_true",
        default=False,
        help="If True, if there are extra points a warning is raised saying how many of those points there are",
    )
    parser.add_argument(
        "--initial_lexsort",
        action="store_true",
        default="False",
        help="Whether to use lexsort or quicksort as the sorting method for the initial sort of the inputs.",
    )
    parser.add_argument("--correction", action="store_true", default=False, help="continuity correction ")
    parser.add_argument(
        "--axis",
        type=int,
        default=0,
        help="Axis can equal None (ravel array first), or an integer (the axis over which to operate on a and b)",
    )
    parser.add_argument(
        "--n",
        type=int,
        default=0,
        help="the number of trials. This is ignored if x gives both the number of successes and failures",
    )
    parser.add_argument("--b", type=int, default=0, help="The number of bins to use for the histogram")
    parser.add_argument("--N", type=int, default=0, help="Score that is compared to the elements in a.")
    parser.add_argument("--ddof", type=int, default=0, help="Degrees of freedom correction")
    parser.add_argument("--score", type=int, default=0, help="Score that is compared to the elements in a.")
    parser.add_argument("--m", type=float, default=0.0, help="limits")
    parser.add_argument("--mf", type=float, default=2.0, help="lower limit")
    parser.add_argument("--nf", type=float, default=99.9, help="higher_limit")
    parser.add_argument(
        "--p",
        type=float,
        default=0.5,
        help="The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5",
    )
    parser.add_argument("--alpha", type=float, default=0.9, help="probability")
    parser.add_argument("--new", type=float, default=0.0, help="Value to put in place of values in a outside of bounds")
    parser.add_argument(
        "--proportiontocut",
        type=float,
        default=0.0,
        help="Proportion (in range 0-1) of total data set to trim of each end.",
    )
    parser.add_argument(
        "--lambda_",
        type=float,
        default=1.0,
        help="lambda_ gives the power in the Cressie-Read power divergence statistic",
    )
    parser.add_argument(
        "--imbda",
        type=float,
        default=0,
        help="If lmbda is not None, do the transformation for that value.If lmbda is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument.",
    )
    parser.add_argument("--base", type=float, default=1.6, help="The logarithmic base to use, defaults to e")
    parser.add_argument("--dtype", help="dtype")
    parser.add_argument("--med", help="med")
    parser.add_argument("--cdf", help="cdf")
    parser.add_argument("--zero_method", help="zero_method options")
    parser.add_argument("--dist", help="dist options")
    parser.add_argument("--ties", help="ties options")
    parser.add_argument("--alternative", help="alternative options")
    parser.add_argument("--mode", help="mode options")
    parser.add_argument("--method", help="method options")
    parser.add_argument("--md", help="md options")
    parser.add_argument("--center", help="center options")
    parser.add_argument("--kind", help="kind options")
    parser.add_argument("--tail", help="tail options")
    parser.add_argument("--interpolation", help="interpolation options")
    parser.add_argument("--statistic", help="statistic options")

    args = parser.parse_args()
    infile = args.infile
    outfile = open(args.outfile, "w+")
    test_id = args.test_id
    nf = args.nf
    mf = args.mf
    imbda = args.imbda
    inclusive1 = args.inclusive1
    inclusive2 = args.inclusive2
    sample0 = 0
    sample1 = 0
    sample2 = 0
    if args.sample_cols != None:
        sample0 = 1
        barlett_samples = []
        for sample in args.sample_cols.split(";"):
            barlett_samples.append(map(int, sample.split(",")))
    if args.sample_one_cols != None:
        sample1 = 1
        sample_one_cols = args.sample_one_cols.split(",")
    if args.sample_two_cols != None:
        sample_two_cols = args.sample_two_cols.split(",")
        sample2 = 1
    for line in open(infile):
        sample_one = []
        sample_two = []
        cols = line.strip().split("\t")
        if sample0 == 1:
            b_samples = columns_to_values(barlett_samples, line)
        if sample1 == 1:
            for index in sample_one_cols:
                sample_one.append(cols[int(index) - 1])
        if sample2 == 1:
            for index in sample_two_cols:
                sample_two.append(cols[int(index) - 1])
        if test_id.strip() == "describe":
            size, min_max, mean, uv, bs, bk = stats.describe(map(float, sample_one))
            cols.append(size)
            cols.append(min_max)
            cols.append(mean)
            cols.append(uv)
            cols.append(bs)
            cols.append(bk)
        elif test_id.strip() == "mode":
            vals, counts = stats.mode(map(float, sample_one))
            cols.append(vals)
            cols.append(counts)
        elif test_id.strip() == "nanmean":
            m = stats.nanmean(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "kurtosistest":
            z_value, p_value = stats.kurtosistest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "itemfreq":
            freq = stats.itemfreq(map(float, sample_one))
            for list in freq:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "boxcox_llf":
            IIf = stats.boxcox_llf(imbda, map(float, sample_one))
            cols.append(IIf)
        elif test_id.strip() == "tiecorrect":
            fa = stats.tiecorrect(map(float, sample_one))
            cols.append(fa)
        elif test_id.strip() == "rankdata":
            r = stats.rankdata(map(float, sample_one), method=args.md)
            cols.append(r)
        elif test_id.strip() == "nanstd":
            s = stats.nanstd(map(float, sample_one), bias=args.bias)
            cols.append(s)
        elif test_id.strip() == "anderson":
            A2, critical, sig = stats.anderson(map(float, sample_one), dist=args.dist)
            cols.append(A2)
            for list in critical:
                cols.append(list)
            cols.append(",")
            for list in sig:
                cols.append(list)
        elif test_id.strip() == "binom_test":
            p_value = stats.binom_test(map(float, sample_one), n=args.n, p=args.p)
            cols.append(p_value)
        elif test_id.strip() == "gmean":
            gm = stats.gmean(map(float, sample_one), dtype=args.dtype)
            cols.append(gm)
        elif test_id.strip() == "hmean":
            hm = stats.hmean(map(float, sample_one), dtype=args.dtype)
            cols.append(hm)
        elif test_id.strip() == "kurtosis":
            k = stats.kurtosis(map(float, sample_one), axis=args.axis, fisher=args.fisher, bias=args.bias)
            cols.append(k)
        elif test_id.strip() == "moment":
            n_moment = stats.moment(map(float, sample_one), n=args.n)
            cols.append(n_moment)
        elif test_id.strip() == "normaltest":
            k2, p_value = stats.normaltest(map(float, sample_one))
            cols.append(k2)
            cols.append(p_value)
        elif test_id.strip() == "skew":
            skewness = stats.skew(map(float, sample_one), bias=args.bias)
            cols.append(skewness)
        elif test_id.strip() == "skewtest":
            z_value, p_value = stats.skewtest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "sem":
            s = stats.sem(map(float, sample_one), ddof=args.ddof)
            cols.append(s)
        elif test_id.strip() == "zscore":
            z = stats.zscore(map(float, sample_one), ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "signaltonoise":
            s2n = stats.signaltonoise(map(float, sample_one), ddof=args.ddof)
            cols.append(s2n)
        elif test_id.strip() == "percentileofscore":
            p = stats.percentileofscore(map(float, sample_one), score=args.score, kind=args.kind)
            cols.append(p)
        elif test_id.strip() == "bayes_mvs":
            c_mean, c_var, c_std = stats.bayes_mvs(map(float, sample_one), alpha=args.alpha)
            cols.append(c_mean)
            cols.append(c_var)
            cols.append(c_std)
        elif test_id.strip() == "sigmaclip":
            c, c_low, c_up = stats.sigmaclip(map(float, sample_one), low=args.m, high=args.n)
            cols.append(c)
            cols.append(c_low)
            cols.append(c_up)
        elif test_id.strip() == "kstest":
            d, p_value = stats.kstest(
                map(float, sample_one), cdf=args.cdf, N=args.N, alternative=args.alternative, mode=args.mode
            )
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "chi2_contingency":
            chi2, p, dof, ex = stats.chi2_contingency(
                map(float, sample_one), correction=args.correction, lambda_=args.lambda_
            )
            cols.append(chi2)
            cols.append(p)
            cols.append(dof)
            cols.append(ex)
        elif test_id.strip() == "tmean":
            if nf is 0 and mf is 0:
                mean = stats.tmean(map(float, sample_one))
            else:
                mean = stats.tmean(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(mean)
        elif test_id.strip() == "tmin":
            if mf is 0:
                min = stats.tmin(map(float, sample_one))
            else:
                min = stats.tmin(map(float, sample_one), lowerlimit=mf, inclusive=args.inclusive)
            cols.append(min)
        elif test_id.strip() == "tmax":
            if nf is 0:
                max = stats.tmax(map(float, sample_one))
            else:
                max = stats.tmax(map(float, sample_one), upperlimit=nf, inclusive=args.inclusive)
            cols.append(max)
        elif test_id.strip() == "tvar":
            if nf is 0 and mf is 0:
                var = stats.tvar(map(float, sample_one))
            else:
                var = stats.tvar(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(var)
        elif test_id.strip() == "tstd":
            if nf is 0 and mf is 0:
                std = stats.tstd(map(float, sample_one))
            else:
                std = stats.tstd(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(std)
        elif test_id.strip() == "tsem":
            if nf is 0 and mf is 0:
                s = stats.tsem(map(float, sample_one))
            else:
                s = stats.tsem(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(s)
        elif test_id.strip() == "scoreatpercentile":
            if nf is 0 and mf is 0:
                s = stats.scoreatpercentile(
                    map(float, sample_one), map(float, sample_two), interpolation_method=args.interpolation
                )
            else:
                s = stats.scoreatpercentile(
                    map(float, sample_one), map(float, sample_two), (mf, nf), interpolation_method=args.interpolation
                )
            for list in s:
                cols.append(list)
        elif test_id.strip() == "relfreq":
            if nf is 0 and mf is 0:
                rel, low_range, binsize, ex = stats.relfreq(map(float, sample_one), args.b)
            else:
                rel, low_range, binsize, ex = stats.relfreq(map(float, sample_one), args.b, (mf, nf))
            for list in rel:
                cols.append(list)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "binned_statistic":
            if nf is 0 and mf is 0:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one), map(float, sample_two), statistic=args.statistic, bins=args.b
                )
            else:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one),
                    map(float, sample_two),
                    statistic=args.statistic,
                    bins=args.b,
                    range=(mf, nf),
                )
            cols.append(st)
            cols.append(b_edge)
            cols.append(b_n)
        elif test_id.strip() == "threshold":
            if nf is 0 and mf is 0:
                o = stats.threshold(map(float, sample_one), newval=args.new)
            else:
                o = stats.threshold(map(float, sample_one), mf, nf, newval=args.new)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trimboth":
            o = stats.trimboth(map(float, sample_one), proportiontocut=args.proportiontocut)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trim1":
            t1 = stats.trim1(map(float, sample_one), proportiontocut=args.proportiontocut, tail=args.tail)
            for list in t1:
                cols.append(list)
        elif test_id.strip() == "histogram":
            if nf is 0 and mf is 0:
                hi, low_range, binsize, ex = stats.histogram(map(float, sample_one), args.b)
            else:
                hi, low_range, binsize, ex = stats.histogram(map(float, sample_one), args.b, (mf, nf))
            cols.append(hi)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "cumfreq":
            if nf is 0 and mf is 0:
                cum, low_range, binsize, ex = stats.cumfreq(map(float, sample_one), args.b)
            else:
                cum, low_range, binsize, ex = stats.cumfreq(map(float, sample_one), args.b, (mf, nf))
            cols.append(cum)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "boxcox_normmax":
            if nf is 0 and mf is 0:
                ma = stats.boxcox_normmax(map(float, sample_one))
            else:
                ma = stats.boxcox_normmax(map(float, sample_one), (mf, nf), method=args.method)
            cols.append(ma)
        elif test_id.strip() == "boxcox":
            if imbda is 0:
                box, ma, ci = stats.boxcox(map(float, sample_one), alpha=args.alpha)
                cols.append(box)
                cols.append(ma)
                cols.append(ci)
            else:
                box = stats.boxcox(map(float, sample_one), imbda, alpha=args.alpha)
                cols.append(box)
        elif test_id.strip() == "histogram2":
            h2 = stats.histogram2(map(float, sample_one), map(float, sample_two))
            for list in h2:
                cols.append(list)
        elif test_id.strip() == "ranksums":
            z_statistic, p_value = stats.ranksums(map(float, sample_one), map(float, sample_two))
            cols.append(z_statistic)
            cols.append(p_value)
        elif test_id.strip() == "ttest_1samp":
            t, prob = stats.ttest_1samp(map(float, sample_one), map(float, sample_two))
            for list in t:
                cols.append(list)
            for list in prob:
                cols.append(list)
        elif test_id.strip() == "ansari":
            AB, p_value = stats.ansari(map(float, sample_one), map(float, sample_two))
            cols.append(AB)
            cols.append(p_value)
        elif test_id.strip() == "linregress":
            slope, intercept, r_value, p_value, stderr = stats.linregress(
                map(float, sample_one), map(float, sample_two)
            )
            cols.append(slope)
            cols.append(intercept)
            cols.append(r_value)
            cols.append(p_value)
            cols.append(stderr)
        elif test_id.strip() == "pearsonr":
            cor, p_value = stats.pearsonr(map(float, sample_one), map(float, sample_two))
            cols.append(cor)
            cols.append(p_value)
        elif test_id.strip() == "pointbiserialr":
            r, p_value = stats.pointbiserialr(map(float, sample_one), map(float, sample_two))
            cols.append(r)
            cols.append(p_value)
        elif test_id.strip() == "ks_2samp":
            d, p_value = stats.ks_2samp(map(float, sample_one), map(float, sample_two))
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "mannwhitneyu":
            mw_stats_u, p_value = stats.mannwhitneyu(
                map(float, sample_one), map(float, sample_two), use_continuity=args.mwu_use_continuity
            )
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "zmap":
            z = stats.zmap(map(float, sample_one), map(float, sample_two), ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "ttest_ind":
            mw_stats_u, p_value = stats.ttest_ind(
                map(float, sample_one), map(float, sample_two), equal_var=args.equal_var
            )
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "ttest_rel":
            t, prob = stats.ttest_rel(map(float, sample_one), map(float, sample_two), axis=args.axis)
            cols.append(t)
            cols.append(prob)
        elif test_id.strip() == "mood":
            z, p_value = stats.mood(map(float, sample_one), map(float, sample_two), axis=args.axis)
            cols.append(z)
            cols.append(p_value)
        elif test_id.strip() == "shapiro":
            W, p_value, a = stats.shapiro(map(float, sample_one), map(float, sample_two), args.reta)
            cols.append(W)
            cols.append(p_value)
            for list in a:
                cols.append(list)
        elif test_id.strip() == "kendalltau":
            k, p_value = stats.kendalltau(
                map(float, sample_one), map(float, sample_two), initial_lexsort=args.initial_lexsort
            )
            cols.append(k)
            cols.append(p_value)
        elif test_id.strip() == "entropy":
            s = stats.entropy(map(float, sample_one), map(float, sample_two), base=args.base)
            cols.append(s)
        elif test_id.strip() == "spearmanr":
            if sample2 == 1:
                rho, p_value = stats.spearmanr(map(float, sample_one), map(float, sample_two))
            else:
                rho, p_value = stats.spearmanr(map(float, sample_one))
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "wilcoxon":
            if sample2 == 1:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one),
                    map(float, sample_two),
                    zero_method=args.zero_method,
                    correction=args.correction,
                )
            else:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one), zero_method=args.zero_method, correction=args.correction
                )
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "chisquare":
            if sample2 == 1:
                rho, p_value = stats.chisquare(map(float, sample_one), map(float, sample_two), ddof=args.ddof)
            else:
                rho, p_value = stats.chisquare(map(float, sample_one), ddof=args.ddof)
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "power_divergence":
            if sample2 == 1:
                stat, p_value = stats.power_divergence(
                    map(float, sample_one), map(float, sample_two), ddof=args.ddof, lambda_=args.lambda_
                )
            else:
                stat, p_value = stats.power_divergence(map(float, sample_one), ddof=args.ddof, lambda_=args.lambda_)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "theilslopes":
            if sample2 == 1:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one), map(float, sample_two), alpha=args.alpha)
            else:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one), alpha=args.alpha)
            cols.append(mpe)
            cols.append(met)
            cols.append(lo)
            cols.append(up)
        elif test_id.strip() == "combine_pvalues":
            if sample2 == 1:
                stat, p_value = stats.combine_pvalues(
                    map(float, sample_one), method=args.med, weights=map(float, sample_two)
                )
            else:
                stat, p_value = stats.combine_pvalues(map(float, sample_one), method=args.med)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "obrientransform":
            ob = stats.obrientransform(*b_samples)
            for list in ob:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "f_oneway":
            f_value, p_value = stats.f_oneway(*b_samples)
            cols.append(f_value)
            cols.append(p_value)
        elif test_id.strip() == "kruskal":
            h, p_value = stats.kruskal(*b_samples)
            cols.append(h)
            cols.append(p_value)
        elif test_id.strip() == "friedmanchisquare":
            fr, p_value = stats.friedmanchisquare(*b_samples)
            cols.append(fr)
            cols.append(p_value)
        elif test_id.strip() == "fligner":
            xsq, p_value = stats.fligner(center=args.center, proportiontocut=args.proportiontocut, *b_samples)
            cols.append(xsq)
            cols.append(p_value)
        elif test_id.strip() == "bartlett":
            T, p_value = stats.bartlett(*b_samples)
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "levene":
            w, p_value = stats.levene(center=args.center, proportiontocut=args.proportiontocut, *b_samples)
            cols.append(w)
            cols.append(p_value)
        elif test_id.strip() == "median_test":
            stat, p_value, m, table = stats.median_test(
                ties=args.ties, correction=args.correction, lambda_=args.lambda_, *b_samples
            )
            cols.append(stat)
            cols.append(p_value)
            cols.append(m)
            cols.append(table)
            for list in table:
                elements = ",".join(map(str, list))
                cols.append(elements)
        outfile.write("%s\n" % "\t".join(map(str, cols)))
    outfile.close()
Example #53
0
def shift(targ, ref, store=None, lowfilter=20):
    """Shifts the given spectrum by placing it on the same wavelength
    scale as the specified reference spectrum, then solves for shifts
    between the two spectra through cross-correlation.

    Args:
        targ (Spectrum): Target spectrum
        ref (Spectrum): Reference spectrum
        store (optional [file or dict]): h5 file or dict to record
            diagnostic data.

    Returns:
        shifted (Spectrum): Adjusted and flattened spectrum
    """
    s = np.copy(targ.s)
    serr = np.copy(targ.serr)
    w = np.copy(targ.w)
    mask = np.copy(targ.mask)
    if s.ndim == 1:
        s = np.array([s])
        serr = np.array([serr])
        w = np.array([w])
        mask = np.array([mask])

    ref = _extend_ref(ref, w[0, 0], w[-1, -1])

    # normalize each order of the target spectrum by dividing by the
    # 95th percentile
    percen_order = np.nanpercentile(s, 95, axis=1)
    s /= percen_order.reshape(-1, 1)

    # create empty 2d arrays to store each order
    s_shifted = np.asarray([[]])
    serr_shifted = np.asarray([[]])
    mask_shifted = np.asarray([[]])
    ws = np.asarray([[]])

    # create lists to store diagnostic data
    lag_data = []
    center_pix_data = []
    fit_data = []

    # length of each section in pixels
    section_length = 500

    if store is not None:
        store['num_orders'] = s.shape[0]

    # Containers for tempoerary holding of data
    s_rescaled = []
    serr_rescaled = []
    m_rescaled = []
    start_idxs = []

    # Fixed number of sections across every order
    num_sections = int(s.shape[1] / section_length) + 1

    # shift each order
    for i in range(s.shape[0]):
        ww = w[i]
        ss = s[i]
        sserr = serr[i]
        mm = mask[i]

        # clip ends off each order
        cliplen = 15
        ww = ww[cliplen:-cliplen]
        ss = ss[cliplen:-cliplen]
        sserr = sserr[cliplen:-cliplen]
        mm = mm[cliplen:-cliplen]

        # clip obvious noise
        clip = np.asarray([True if sp < 1.2 else False for sp in ss])
        ss = ss[clip]
        sserr = sserr[clip]
        mm = mm[clip]
        ww = ww[clip]

        # get the reference spectrum in the same range as the target range
        w_min = ww[0]
        w_max = ww[-1]

        in_range = np.asarray([True if wr > w_min and wr < w_max else False
            for wr in ref.w])
        start_idxs.append(np.argmax(in_range))
        w_ref_c = ref.w[in_range]
        s_ref_c = ref.s[in_range]
        m_ref_c = ref.mask[in_range]

        # place the target spectrum on the same wavelength scale
        ss, sserr, mm = rescale_w(ss, sserr, ww, mm, w_ref_c)

        s_rescaled.append(ss)
        serr_rescaled.append(sserr)
        m_rescaled.append(mm)

        # true section length
        l_sect = int(len(ss) / num_sections)

        lags = np.empty(num_sections)
        center_pix = np.empty(num_sections)

        if store is not None:
            key = "order_{0:d}/num_sections".format(i)
            store[key] = num_sections

        for j in range(num_sections):
            # Get indices for section
            idx_min = j * l_sect
            idx_max = (j+1) * l_sect
            center_pix[j] = (j + 1/2)*l_sect

            ss_sect = ss[idx_min:idx_max]
            mm_sect = mm[idx_min:idx_max]
            s_ref_sect = s_ref_c[idx_min:idx_max]
            m_ref_sect = m_ref_c[idx_min:idx_max]

            # Don't use segments which have too many nans
            if len(ss_sect[mm_sect]) < (l_sect / 2) or \
                    len(s_ref_sect[m_ref_sect]) < (l_sect / 2):
                lag = np.nan
                lag_arr = []
                xcorr = []
            else:
                # get the shifts in pixel number
                lag, lag_arr, xcorr = solve_for_shifts(ss_sect, mm_sect,
                                                       s_ref_sect, m_ref_sect,
                                                       lowfilter=lowfilter)
            # Save results
            lags[j] = lag
            if store is not None:
                key = "order_{0:d}/sect_{1:d}/".format(i, j)
                store[key+"xcorr"] = xcorr
                store[key+"lag_arr"] = lag_arr

        # Save lag data
        lag_data.append(lags)
        center_pix_data.append(center_pix)

    lag_data = np.asarray(lag_data)
    # Compute sigma-clipped mean lags for each segment, if there are multiple
    # orders
    if s.shape[0] > 1:
        clip = 2
        for j in range(lag_data.shape[1]):
            lag_order = lag_data[:, j]
            lag_order = lag_order[~np.isnan(lag_order)]
            clipped, crit_low, crit_high = sigmaclip(lag_order, low=clip,
                                                     high=clip)

            mean_lag = np.nanmean(clipped)

            # Replace values outside the critical range and nans with mean_lag
            for i in range(lag_data.shape[0]):
                curr = lag_data[i, j]
                lag_data[i, j] = curr if curr > crit_low and curr < crit_high \
                    else mean_lag
    else:
        for j in range(lag_data.shape[1]):
            curr = lag_data[0, j]
            if j == 0:
                lag_data[0, j] = lag_data[0, j + 1] if np.isnan(curr) else curr
            elif j == (lag_data.shape[1] - 1):
                lag_data[0, j] = lag_data[0, j - 1] if np.isnan(curr) else curr
            else:
                lag_data[0, j] = np.nanmean([lag_data[0, j - 1],
                    lag_data[0, j + 1]]) if np.isnan(curr) else curr

    for i in range(s.shape[0]):
        # Restore data from previous loop
        ss = s_rescaled[i]
        sserr = serr_rescaled[i]
        mm = m_rescaled[i]
        start_idx = start_idxs[i]

        lags = lag_data[i]
        center_pix = center_pix_data[i]

        # use robust least squares to fit a line to the shifts
        # (Cauchy loss function)
        p_guess = np.array([0, 0])
        fit_res = least_squares(_linear_fit_residuals, p_guess,
                                args=(center_pix, lags), loss='cauchy')
        fit = fit_res.x
        pix_arr = np.arange(0, len(ss))
        pix_shifted = pix_arr - fit[1] - pix_arr*fit[0]

        # don't read past the wavelength array
        pix_min = max(int(pix_shifted[0]), 0)
        pix_max = min(int(pix_shifted[-1]), len(ref.w)-start_idx)

        # new pixel array
        new_pix = np.arange(pix_min, pix_max)
        # new wavelength array
        w_ref_c = ref.w[start_idx+pix_min:start_idx+pix_max]

        # interpolate the spectrum back onto the reference spectrum
        ss_shifted = np.interp(new_pix, pix_shifted, ss)
        sserr_shifted = np.interp(new_pix, pix_shifted, sserr)
        mm_shifted = np.interp(new_pix, pix_shifted, mm)

        # append to array
        s_shifted = np.append(s_shifted, ss_shifted)
        serr_shifted = np.append(serr_shifted, sserr_shifted)
        mask_shifted = np.append(mask_shifted, mm_shifted)
        ws = np.append(ws, w_ref_c)

        # save diagnostic data
        fitted = fit[0] * center_pix + fit[1]
        fit_data.append(np.array(fitted))

    # save diagnostic data
    if store is not None:
        # convert jagged array to rectangular one
        lengths = []
        for l in lag_data:
            lengths.append(len(l))
        ml = max(lengths)

        lag_data = [utils.extend_array(l, ml) for l in lag_data]
        center_pix_data = [utils.extend_array(l, ml) for l in center_pix_data]
        fit_data = [utils.extend_array(l, ml) for l in fit_data]

        store['lag'] = np.asarray(lag_data)
        store['center_pix'] = np.asarray(center_pix_data)
        store['fit'] = np.asarray(fit_data)

    # flatten spectrum
    w_min = ws[0]
    w_max = ws[-1]
    in_range = np.asarray([True if wr > w_min and wr < w_max
                           else False for wr in ref.w])
    w_ref_trunc = ref.w[in_range]

    w_flat, s_flat, serr_flat, mask_flat = \
        flatten(ws, s_shifted, serr_shifted, mask_shifted, w_ref=w_ref_trunc)

    return spectrum.Spectrum(w_flat, s_flat, serr_flat, name=targ.name,
                             mask=mask_flat, header=targ.header,
                             attrs=targ.attrs)
        flx= spec[0].data[1]
        #spec.close()
        #   flxx= np.nan_to_num(flx) #making sure the flux array has no NAN or INF
        wlen= spec[0].data[0]
        norm_flx= flx/np.median(flx[2360:2390]) # normalize spectra
        clust_spec= np.vstack((clust_spec, norm_flx)) # 2D array. 1st row: restframe wavelength, other rows have corrected fluxes of spectra from clusters (one for each row)
        del spec
    
    print "cluster", c+1, "has", len(clust_spec[1:]), "objects"
    
    spec_num.append(len(clust_spec[1:]))
    
    clipped_compo=[]
    for i in range(clust_spec.shape[1]):
        
        y= sigmaclip(clust_spec[1:,i], 3, 3)
        m=median(y[0])
        clipped_compo.append(m)
    
    compos.append(clipped_compo) # list with the composites (compos[0] is composite from 1st cluster, compos[1] 2nd cluster,...)


#save the composites as fits files

for i,j in zip(range(1,k+1), spec_num):
    spec_name= "./composites/"+clstr_name+"_"+str(k)+"clstrs"+str(i)+".fits" #assumes there is a directory called composites in the working directory
    spec_file= np.vstack((wlen,compos[i-1]))
    hdu= fits.PrimaryHDU(spec_file)
    hdr= hdu.header
    hdr.set('SPEC_NUMBER', j)
    hdr.set('COMPOSITE', clstr_name)
Example #55
0
def getClippedMeanandStddev(data, nsig=3):
    from scipy.stats import sigmaclip
    import numpy as np
    clipped_array = sigmaclip(data, low=nsig, high=nsig)[0]
    return np.mean(clipped_array), np.std(clipped_array)
Example #56
0
def getSEPSky(imgArr, mskArr, imgHead, skyClip=3, zp=27.0, pix=0.168,
              rebin=4, prefix='sep_sky', suffix='imgsub',
              verbose=True, visual=True, bkgSize=40, bkgFilter=5,
              saveBkg=False, nClip=2):
    """
    Estimating the background using SEP.

    Parameters:
    """
    if verbose:
        print SEP
        print "### ESTIMATING THE GLOBAL BACKGROUND AND SURFACE \
                BRIGHTNESS LIMIT"
    dimX, dimY = imgArr.shape
    mskX, mskY = mskArr.shape
    if (dimX != mskX) or (dimY != mskY):
        raise Exception("## The image and mask don't have the same size!")

    # What if there is no useful masked pixel
    imgMasked = copy.deepcopy(imgArr)
    imgMasked[mskArr > 0] = np.nan
    try:
        sepBkg = sep.Background(imgMasked, bw=bkgSize, bh=bkgSize,
                                fw=bkgFilter, fh=bkgFilter)
    except ValueError:
        imgTemp = copy.deepcopy(imgMasked)
        imgTemp = imgTemp.byteswap(True).newbyteorder()
        sepBkg = sep.Background(imgTemp, bw=bkgSize, bh=bkgSize,
                                fw=bkgFilter, fh=bkgFilter)

    avgBkg = sepBkg.globalback
    rmsBkg = sepBkg.globalrms
    if (not np.isfinite(avgBkg)) or (not np.isfinite(rmsBkg)):
        print WAR
        warnings.warn("## The average or rms of SEP background is infinite")
    if verbose:
        print SEP
        print "### SEP BKG AVG, RMS : %10.7f, %10.7f" % (avgBkg, rmsBkg)

    # Subtract the sky model from the image
    try:
        imgBkg = sepBkg.back()
        imgSub = (imgArr - imgBkg)
        fitsSub = prefix + '_' + suffix + '.fits'
        # Save the new image
        imgSave = copy.deepcopy(imgSub)
        imgSave = imgSave.byteswap(True).newbyteorder()
        hdu = fits.PrimaryHDU(imgSave)
        hdu.header = imgHead
        hdulist = fits.HDUList([hdu])
        hdulist.writeto(fitsSub, clobber=True)

        if saveBkg:
            fitsBkg = prefix + '_' + suffix + '_bkg.fits'
            bkgSave = copy.deepcopy(imgBkg)
            bkgSave = bkgSave.byteswap(True).newbyteorder()
            hdu = fits.PrimaryHDU(bkgSave)
            hdu.header = imgHead
            hdulist = fits.HDUList([hdu])
            hdulist.writeto(fitsBkg, clobber=True)

    except Exception:
        print WAR
        warnings.warn("## Something wrong with the SEP background subtraction")

    # Rebin image
    dimBinX = int((dimX - 1) / rebin)
    dimBinY = int((dimY - 1) / rebin)
    try:
        imgBin = hUtil.congrid(imgArr, (dimBinX, dimBinY), method='nearest')
        subBin = hUtil.congrid(imgSub, (dimBinX, dimBinY), method='nearest')
        mskBin = hUtil.congrid(mskArr, (dimBinX, dimBinY), method='neighbour')
    except Exception:
        print WAR
        warnings.warn("congrid fails!")
        imgBin = imgArr
        subBin = imgSub
        mskBin = mskArr

    pixSky1 = imgBin[mskBin == 0].flatten()
    pixSky1 = pixSky1[np.isfinite(pixSky1)]
    try:
        pixSky1, low1, upp1 = sigmaclip(pixSky1, low=skyClip, high=skyClip)
        print "### %d pixels left for sky of origin image" % len(pixSky1)
        print "###      Boundary: %8.5f -- %8.5f" % (low1, upp1)
    except Exception:
        print WAR
        warnings.warn("Sigma clip fails for imgBin")

    pixSky2 = subBin[mskBin == 0].flatten()
    pixSky2 = pixSky2[np.isfinite(pixSky2)]
    try:
        pixSky2, low2, upp2 = sigmaclip(pixSky2, low=skyClip, high=skyClip)
        print "### %d sky pixels left on bkg subtracted image" % len(pixSky2)
        print "###      Boundary: %8.5f -- %8.5f" % (low2, upp2)
    except Exception:
        print WAR
        warnings.warn("Sigma clip fails for mskBin")

    if visual:
        sepPNG = prefix + '_' + suffix + '_skyhist.png'
        showSkyHist(pixSky2, skypix2=pixSky1, pngName=sepPNG)

    return imgSub
Example #57
0
def getSEPSky(imgArr,
              mskArr,
              imgHead,
              skyClip=3,
              zp=27.0,
              pix=0.168,
              rebin=4,
              prefix='sep_sky',
              suffix='imgsub',
              verbose=True,
              visual=True,
              bkgSize=40,
              bkgFilter=5,
              saveBkg=False,
              nClip=2):
    """
    Estimating the background using SEP.

    Parameters:
    """
    dimX, dimY = imgArr.shape
    mskX, mskY = mskArr.shape
    if (dimX != mskX) or (dimY != mskY):
        raise Exception("## The image and mask don't have the same size!")

    # What if there is no useful masked pixel
    try:
        sepBkg = sep.Background(imgArr, mask=mskArr, bw=bkgSize, bh=bkgSize,
                                fw=bkgFilter, fh=bkgFilter)
    except ValueError:
        imgArr = imgArr.byteswap(True).newbyteorder()
        sepBkg = sep.Background(imgArr, mask=mskArr, bw=bkgSize, bh=bkgSize,
                                fw=bkgFilter, fh=bkgFilter)

    avgBkg = sepBkg.globalback
    rmsBkg = sepBkg.globalrms
    if (not np.isfinite(avgBkg)) or (not np.isfinite(rmsBkg)):
        warnings.warn("###    The SEP background has problem")
    if verbose:
        print("###    SEP BKG AVG, RMS : %10.7f, %10.7f" % (avgBkg, rmsBkg))

    # Subtract the sky model from the image
    try:
        imgBkg = sepBkg.back()
        imgSub = (imgArr - imgBkg)
        fitsSub = prefix + '_' + suffix + '.fits'
        # Save the new image
        imgSave = copy.deepcopy(imgSub)
        imgSave = imgSave.byteswap(True).newbyteorder()
        hdu = fits.PrimaryHDU(imgSave)
        hdu.header = imgHead
        hdulist = fits.HDUList([hdu])
        hdulist.writeto(fitsSub, overwrite=True)

        if saveBkg:
            fitsBkg = prefix + '_' + suffix + '_bkg.fits'
            bkgSave = copy.deepcopy(imgBkg)
            bkgSave = bkgSave.byteswap(True).newbyteorder()
            hdu = fits.PrimaryHDU(bkgSave)
            hdu.header = imgHead
            hdulist = fits.HDUList([hdu])
            hdulist.writeto(fitsBkg, overwrite=True)
    except Exception:
        warnings.warn("## Something wrong with the SEP background subtraction")

    # Rebin image
    dimBinX = int((dimX - 1) / rebin)
    dimBinY = int((dimY - 1) / rebin)
    try:
        imgBin = hUtil.congrid(imgArr, (dimBinX, dimBinY), method='nearest')
        subBin = hUtil.congrid(imgSub, (dimBinX, dimBinY), method='nearest')
        mskBin = hUtil.congrid(mskArr, (dimBinX, dimBinY), method='neighbour')
    except Exception:
        warnings.warn("congrid fails!")
        print("###    Image rebin is failed for this galaxy !!!")
        imgBin = imgArr
        subBin = imgSub
        mskBin = mskArr

    pixSky1 = imgBin[mskBin == 0].flatten()
    pixSky1 = pixSky1[np.isfinite(pixSky1)]
    try:
        pixSky1, low1, upp1 = sigmaclip(pixSky1, low=skyClip, high=skyClip)
    except Exception:
        warnings.warn("\nSigma clip fails for imgBin")

    pixSky2 = subBin[mskBin == 0].flatten()
    pixSky2 = pixSky2[np.isfinite(pixSky2)]
    try:
        pixSky2, low2, upp2 = sigmaclip(pixSky2, low=skyClip, high=skyClip)
    except Exception:
        warnings.warn("Sigma clip fails for mskBin")

    if visual:
        sepPNG = prefix + '_' + suffix + '_skyhist.png'
        showSkyHist(pixSky2, skypix2=pixSky1, pngName=sepPNG)

    return imgSub