Пример #1
0
    def _sigclip_data(self, data_ma):
        """
        Perform sigma clipping on the data in regions of size
        ``box_shape``.
        """

        ny, nx = data_ma.shape
        ny_box, nx_box = self.box_shape
        y_nbins = int(ny / ny_box)   # always integer because data were padded
        x_nbins = int(nx / nx_box)   # always integer because data were padded
        data_rebin = np.ma.swapaxes(data_ma.reshape(
            y_nbins, ny_box, x_nbins, nx_box), 1, 2).reshape(y_nbins, x_nbins,
                                                             ny_box * nx_box)
        del data_ma
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            if ASTROPY_LT_1P1:
                self.data_sigclip = sigma_clip(
                    data_rebin, sig=self.sigclip_sigma, axis=2,
                    iters=self.sigclip_iters, cenfunc=np.ma.median,
                    varfunc=np.ma.var)
            else:
                self.data_sigclip = sigma_clip(
                    data_rebin, sigma=self.sigclip_sigma, axis=2,
                    iters=self.sigclip_iters, cenfunc=np.ma.median,
                    stdfunc=np.std)
        del data_rebin
Пример #2
0
    def _calculate_collapse(self, data_name, operation, spatial_region, sigma_selection, sigma_parameter, start_index, end_index):

        start_wavelength = self.wavelengths[start_index]
        end_wavelength = self.wavelengths[end_index]

        label = '{}-collapse-{} ({:.4e}, {:.4e})'.format(data_name, operation,
                                                         start_wavelength,
                                                         end_wavelength)

        # Setup the input_data (and apply the spatial mask based on
        # the selection in the spatial_region_combobox
        input_data = self.data[data_name]
        log.debug('    spatial region is {}'.format(spatial_region))
        if not spatial_region == 'Image':
            subset = [x.to_mask() for x in self.data.subsets if x.label == spatial_region][0]
            input_data = input_data * subset

        # Apply sigma clipping
        if 'Simple' in sigma_selection:
            sigma = sigma_parameter

            if sigma is None:
                return

            input_data = sigma_clip(input_data, sigma=sigma, axis=0)
            label += ' sigma={}'.format(sigma)

        elif 'Advanced' in sigma_selection:
            sigma, sigma_lower, sigma_upper, sigma_iters = sigma_parameter
            log.debug('    returned from calculate_callback_advanced_sigma_check with sigma {}  sigma_lower {}  sigma_upper {}  sigma_iters {}'.format(
                sigma, sigma_lower, sigma_upper, sigma_iters))

            if sigma is None:
                return

            input_data = sigma_clip(input_data, sigma=sigma, sigma_lower=sigma_lower,
                                       sigma_upper=sigma_upper, iters=sigma_iters, axis=0)

            # Add to label so it is clear which overlay/component is which
            if sigma:
                label += ' sigma={}'.format(sigma)

            if sigma_lower:
                label += ' sigma_lower={}'.format(sigma_lower)

            if sigma_upper:
                label += ' sigma_upper={}'.format(sigma_upper)

            if sigma_iters:
                label += ' sigma_iters={}'.format(sigma_iters)
        else:
            input_data = input_data # noop

        # Do calculation if we got this far
        new_wavelengths, new_component = collapse_cube(input_data, data_name, self.data.coords.wcs,
                                             operation, start_index, end_index)

        new_component_unit = self.data.get_component(data_name).units

        return new_wavelengths, new_component, new_component_unit, label
Пример #3
0
def calc_stat(data, sigma=1.8, niter=10, algorithm='median'):
    """Calculate statistics for given data.

    Parameters
    ----------
    data : ndarray
        Data to be calculated from.

    sigma : float
        Sigma for sigma clipping.

    niter : int
        Number of iterations for sigma clipping.

    algorithm : {'mean', 'median', 'mode', 'stddev'}
        Algorithm for statistics calculation.

    Returns
    -------
    val : float
        Statistics value.

    Raises
    ------
    ValueError
        Invalid algorithm.

    """
    arr = np.ravel(data)

    if len(arr) < 1:
        return 0.0

    if ((astropy_version.major==1 and astropy_version.minor==0) or
            (astropy_version.major < 1)):
        arr_masked = sigma_clip(arr, sig=sigma, iters=niter)
    else:
        arr_masked = sigma_clip(arr, sigma=sigma, iters=niter)

    arr = arr_masked.data[~arr_masked.mask]

    if len(arr) < 1:
        return 0.0

    algorithm = algorithm.lower()
    if algorithm == 'mean':
        val = arr.mean()
    elif algorithm == 'median':
        val = np.median(arr)
    elif algorithm == 'mode':
        val = biweight_location(arr)
    elif algorithm == 'stddev':
        val = arr.std()
    else:
        raise ValueError('{0} is not a valid algorithm for sky background '
                         'calculations'.format(algorithm))

    return val
Пример #4
0
    def plotit(x,y,which,nbins=15,**kwargs):
        x = x.clip(0,10)
        y = y.clip(0,10)
        x = sigma_clip(x,iters=2,sigma=5)
        y = sigma_clip(y,iters=2,sigma=5)
        ii = np.where(~x.mask & ~y.mask)
        x = x[ii].filled()
        y = y[ii].filled()
#        xr,yr = {'optwise':[(0.0,1.3),(-0.02,0.08)]}[which]
        n,xx,yy = np.histogram2d(x,y,nbins)#,[xr,yr])
        plt.contour(n.T,extent=[xx[0],xx[-1],yy[0],yy[-1]],**kwargs)
Пример #5
0
    def _sigclip_data(self):
        """
        Perform sigma clipping on the (masked) data in regions of size
        ``box_size``.
        """

        data3d = np.ma.swapaxes(self.data_ma.reshape(
            self.nyboxes, self.box_size[0], self.nxboxes, self.box_size[1]),
            1, 2).reshape(self.nyboxes, self.nxboxes, self.box_npts)
        del self.data_ma

        # the number of masked pixels in each mesh including *only* the
        # input (and padding) mask
        self._nmasked_mesh_orig = np.ma.count_masked(data3d, axis=2)

        self.mesh_yidx, self.mesh_xidx = self._define_mesh2d_indices(
            self._nmasked_mesh_orig)
        data2d = data3d[self.mesh_yidx, self.mesh_xidx, :]

        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            if ASTROPY_LT_1P1:
                self.data_sigclip = sigma_clip(
                    data2d, sig=self.sigclip_sigma, axis=1,
                    iters=self.sigclip_iters, cenfunc=np.ma.median,
                    varfunc=np.ma.var)
            else:
                self.data_sigclip = sigma_clip(
                    data2d, sigma=self.sigclip_sigma, axis=1,
                    iters=self.sigclip_iters, cenfunc=np.ma.median,
                    stdfunc=np.std)

        # the number of masked and unmasked pixels in each mesh
        # including *both* the input (and padding) mask and pixels masked
        # via sigma clipping
        nmasked = np.ma.count_masked(self.data_sigclip, axis=1)

        if self.remove_masked == 'threshold':
            idx1d = np.where((self.box_npts - nmasked) >=
                             self.meshpix_threshold)
            self.data_sigclip = self.data_sigclip[idx1d]
            nmasked = nmasked[idx1d]
            self.mesh_yidx = self.mesh_yidx[idx1d]
            self.mesh_xidx = self.mesh_xidx[idx1d]
            if len(self.mesh_yidx) == 0:
                raise ValueError('There are no valid meshes available.')

        self.nmasked_mesh = self._convert_1d_to_2d_mesh(nmasked)
        self.nunmasked_mesh = self.box_npts - self.nmasked_mesh
        return
Пример #6
0
    def sigma_clip(self, data):
        if not self.sigclip:
            return data

        if ASTROPY_LT_1P1:
            warnings.warn('sigma_lower and sigma_upper will be ignored '
                          'because they are not supported astropy < 1.1',
                          AstropyUserWarning)
            return sigma_clip(data, sig=self.sigma,
                              iters=self.iters)
        else:
            return sigma_clip(data, sigma=self.sigma,
                              sigma_lower=self.sigma_lower,
                              sigma_upper=self.sigma_upper,
                              iters=self.iters)
Пример #7
0
def plot_image(ax,data,size,alpha=0.5,cmap='cubehelix',extent=None):

	sigclip = sigma_clip(data,6)
	sigclip = sigclip.filled(np.nanmax(sigclip)) # fill clipped values with max

	'''
	### shift all numbers so that minimum = 0
	try:
		minimum = sigclip[size[0]:size[1],size[2]:size[3]].min()
	except ValueError:
		print 1/0
	if sigclip.min() < 0:
		sigclip += np.abs(minimum)
	else:
		sigclip -= np.abs(minimum)
	'''

	'''
	### put into units of standard deviations
	### start linear scale at 3sigma
	std = np.std(sigclip[size[2]:size[3],size[0]:size[1]])
	sigclip /= std
	sigclip = np.clip(sigclip,1.0,np.inf)
	
	minimum = np.nanmin(sigclip)
	sigclip = sigclip - minimum
	'''
	### show image
	ax.imshow(sigclip, origin='lower', cmap=cmap,alpha=alpha,extent=extent)
Пример #8
0
    def remove_aperture_outliers(self, aperture_centers, aperture_means, aperture_stddevs):

        """
        This function ...
        :return:
        """

        means_distribution = Distribution.from_values(aperture_means, bins=50)
        stddevs_distribution = Distribution.from_values(aperture_stddevs, bins=50)

        #means_distribution.plot("Aperture means before sigma-clipping")
        #stddevs_distribution.plot("Aperture stddevs before sigma-clipping")

        clip_mask = stats.sigma_clip(aperture_stddevs, sigma=3.0, iters=None, copy=False).mask

        clipped_aperture_centers = []
        for i in range(len(clip_mask)):
            if clip_mask[i]: continue
            else: clipped_aperture_centers.append(aperture_centers[i])
        aperture_centers = clipped_aperture_centers
        aperture_means = np.ma.MaskedArray(aperture_means, clip_mask).compressed()
        aperture_stddevs = np.ma.MaskedArray(aperture_stddevs, clip_mask).compressed()

        means_distribution = Distribution.from_values(aperture_means, bins=50)
        stddevs_distribution = Distribution.from_values(aperture_stddevs, bins=50)

        #means_distribution.plot("Aperture means after sigma-clipping")
        #stddevs_distribution.plot("Aperture stddevs after sigma-clipping")

        # Return the sigma-clipped aperture properties
        return aperture_centers, aperture_means, aperture_stddevs
Пример #9
0
def sigma_clip_mask(data, sigma_level=3.0, mask=None):

    """
    This function ...
    :param data:
    :param sigma_level:
    :param mask:
    :return:
    """

    # Split the x, y and z values of the data, without the masked values
    x_values, y_values, z_values = general.split_xyz(data, mask=mask)

    # Sigma-clip z-values that are outliers
    masked_z_values = sigma_clip(z_values, sigma=sigma_level, iters=None, copy=False)

    # Copy the mask or create a new one if none was provided
    new_mask = copy.deepcopy(mask) if mask is not None else Mask(np.zeros_like(data))

    for i, masked in enumerate(masked_z_values.mask):

        if masked:

            x = x_values[i]
            y = y_values[i]
            new_mask[y,x] = True

    #if not isinstance(new_mask, Mask): print(new_mask, mask)

    # Assert the mask is of type 'Mask'
    assert isinstance(new_mask, Mask)

    # Return the new or updated mask
    return new_mask
Пример #10
0
def sigma_clip(values, sigma_level=3.0, return_nmasked=False, logarithmic=False):

    """
    This function ...
    :param values:
    :param sigma_level:
    :param return_nmasked:
    :param logarithmic: the values are logarithmically distributed
    :return:
    """

    from astropy.stats import sigma_clip

    # Sigma-clip
    if logarithmic: values = np.log10(logarithmic)
    masked_array = sigma_clip(values, sigma=sigma_level, iters=None)

    # Get the number of masked values
    nmasked = np.sum(masked_array.mask)

    # Get the clipped list of values
    clipped = list(masked_array.compressed())

    # Return as list
    if return_nmasked: return clipped, nmasked
    else: return clipped
Пример #11
0
def cutoff(values, method, limit):

    """
    This function ...
    :param values:
    :param method:
    :param limit:
    """

    # Percentage method
    if method == "percentage":

        # Create a sorted list for the input values
        sorted_values = sorted(values)

        # Determine the splitting point
        split = (1.0-limit) * len(sorted_values)
        index = int(round(split))

        # Return the corresponding value in the sorted list
        return sorted_values[index]

    # Sigma-clipping method
    elif method == "sigma_clip":

        # Perform sigma clipping on the input list
        masked_values = sigma_clip(np.array(values), sigma=limit, iters=None, copy=False)

        # Calculate the maximum of the masked array
        return np.ma.max(masked_values)

    else: raise ValueError("Invalid cutoff method (must be 'percentage' or 'sigma_clip'")
Пример #12
0
def linear_bias_kappa(kt, kp):
    kt = kt.ravel()
    kp = kp.ravel()
    kt = sigma_clip(kt, 8, 10)
    kp = kp[~kt.mask]
    kt = kt.compressed()
    print kp.shape, kt.shape
    bin = np.linspace(kp.min(), kp.max(), 30)
    kp_b, kp_be, kt_b, kt_be, N, B =  MyF.AvgQ(kp, kt, bin)
    N[N == 0] = 1
    kt_be /= np.sqrt(N)
    b_init = [1]
    kt_be[kt_be == 0] = 99.0
    chi2 = lambda b: np.sum(((kt_b - kp_b * b[0]) / kt_be)**2)
    bias = optimize.fmin(chi2, b_init)
    print 'Bias ', 1/bias

    
    chi2 = lambda b: np.sum(((kt_b - kp_b * b) / kt_be)**2)
    m = minuit.Minuit(chi2)
    m.migrad()
    m.hesse()
    b = m.values['b']
    be = m.errors['b']
    bias = 1/b
    bias_e = be/b**2.
    print 'b = %2.2f \pm %2.2f'%(bias, bias_e)
    return bias, bias_e
Пример #13
0
 def clipped(self, compress=False, inplace=False):
     clip = sigma_clip(self, self.sigma)
     if compress:
         clip = clip.compress()
     if not inplace:
         return clip
     self = clip
def plot_normalized_lightcurve(picid, lc0, psc_dir):
    plt.figure(figsize=(12, 6))
    i = 0
    for color in 'rgb':
        (lc0.loc[lc0.color == color].target + i).plot(marker='o', color=color, alpha=0.5)
        (lc0.loc[lc0.color == color].reference + i).plot(marker='x', color=color, alpha=0.5)
        i += .3

    plt.title(f'Normalized Flux per channel (+ offset) - {picid}')
    plt.legend()

    plot_fn = os.path.join(psc_dir, f'normalized-flux-{picid}.png')
    plt.savefig(plot_fn)

    # Different
    plt.figure(figsize=(12, 6))
    i = 0
    for color in 'rgb':
        t0 = lc0.loc[lc0.color == color].target
        r0 = lc0.loc[lc0.color == color].reference
        f0 = sigma_clip(t0 / r0, sigma=3)
        plt.plot((f0 + i), marker='o', ls='', alpha=0.5, color=color)
        i += .1

    # plt.ylim([.9, 1.1])

    plt.title(f'Normalized Flux per channel (+ offset) - {picid}')
    plt.legend()

    plot_fn = os.path.join(psc_dir, f'normalized-flux-{picid}.png')
    plt.savefig(plot_fn)
Пример #15
0
def masterfunc1(op):
	xax=np.arange(1.5,4.75,step=0.25)
	global allstackd1
	global allstackd2
	allstackd1=np.asarray(allstackd1)
	allstackd2=np.asarray(allstackd2)
	
	panel1,smooth1=highpassflist(allstackd1,op)
	panel2,smooth2=highpassflist(allstackd2,op)

	beforenan= np.argwhere(np.isnan(smooth2))
	beforenan =  collections.Counter([x for (x,y) in beforenan])

	panel4= map(lambda t: sigma_clip(smooth2[t],sigma=5),np.arange(0,len(xax)))

	noisef1=np.ma.asarray(panel4)
	#panel3=np.ma.asarray(panel3)
	
	afternan = map( np.ma.count_masked,panel4)
	beforenan= map(lambda t: beforenan[t], np.arange(0,len(beforenan)))
	toss=np.asarray(afternan)-np.asarray(beforenan)

	panel4=map(normstar,panel4)
	panel4= map( lambda t: t-1, panel4)
	panel4=map(lambda t: np.sqrt(np.nanmean(np.square(t))), panel4)

	panel3=toss/(ct*64.0)
	panels=[]
	panels.append(panel1)
	panels.append(panel2)
	panels.append(panel3)
	panels.append(panel4)
	return panels
Пример #16
0
	def filtered_selection(self,obsDb,ii):
		# filter out bright sky values
		t = join(obsDb[ii],self.sky,'frameIndex')
		if len(t) < len(ii):
			raise ValueError('missing files!')
		#
		badfield = np.in1d(obsDb['objName'][ii],
		                   ['rm10','rm11','rm12','rm13'])
		badfield |= obsDb['fileName'][ii] == 'bokrm.20150405.0059'
		fvar = t['skyRms']**2/t['skyMean']
		badrms = sigma_clip(fvar,sigma=3.0,iters=2).mask
		keep = (t['skyMean'] < self.maxCounts) & ~badrms & ~badfield
		if keep.sum() < self.minNImg:
			return ~badrms & ~badfield
		#
		grpNum = 0
		pgrp = [grpNum]
		for _i in range(1,len(ii)):
			if obsDb['objName'][ii[_i]] != obsDb['objName'][ii[_i-1]]:
				grpNum += 1
			pgrp.append(grpNum)
		pgrp = np.array(pgrp)
		#
		jj = np.where(keep)[0]
		keep[jj] = False
		jj2 = t['skyMean'][jj].argsort()[:self.maxNImg*2]
		_,jj3 = np.unique(pgrp[jj[jj2]],return_index=True)
		jj4 = t['skyMean'][jj[jj2[jj3]]].argsort()[:self.maxNImg]
		keep[jj[jj2[jj3[jj4]]]] = True
		return keep
Пример #17
0
def bin_every_n(x, start_idx, n = 10, reduction_func=lambda x: np.mean(x, 0)):
    out = []
    if x.ndim == 1:
        x = x[:, None]
    for i in range(start_idx, x.shape[0], n):
        end_idx = min(i+n, x.shape[0])
        out.append(st.sigma_clip(x[i:end_idx, :], sigma=2.5, iters=1, axis=0).mean(0))
    return np.array(out)
Пример #18
0
def masterfunc1(op):
	xax=[2.5]
	global allstackd1
	allstackd1=np.asarray(allstackd1)
	print 'allstackd1',len(allstackd1)
	panel1,panel2,smooth1=highpassflist(allstackd1,op)


	'''

	panel1=map(normstar,flist1)
	panel1= map( lambda t: t-1, panel1)
	panel1=map(lambda t: np.sqrt(np.nanmean(np.square(t))), panel1)
	'''
	smooth1=np.asarray(smooth1)
	beforenan= np.argwhere(np.isnan(smooth1))
	beforenan=beforenan.flatten()
	panel3= sigma_clip(smooth1,sigma=5,cenfunc=np.nanmedian)
	p=np.ma.fix_invalid(panel3.data,panel3.mask,fill_value=0)
	tosstemp= [i for i, x in enumerate(panel3.mask) if x]
	panel3=np.asarray(panel3)
	for x in tosstemp:
		panel3[x]=np.nan
	afternan = np.ma.asarray(panel3)
	afternan = np.argwhere(np.isnan(afternan))
	afternan = afternan.flatten()
	'''
	smooth1=np.asarray(smooth1)
	beforenan= np.argwhere(np.isnan(smooth1))
	beforenan=beforenan.flatten()
	panel3= sigma_clip(smooth1,sigma=5)
	afternan = np.asarray(panel3)
	afternan = np.argwhere(np.isnan(afternan))
	afternan = afternan.flatten()
	'''
	print beforenan
	print tosstemp
	print 'len of bef',len(beforenan)
	print 'len of toss temp',len(tosstemp)
	
	toss = set(tosstemp) - set(beforenan)
	#beforenan= map(lambda t: beforenan[t], np.arange(0,len(beforenan)))
	#toss=np.asarray(afternan)-np.asarray(beforenan)
	#print toss
	
	'''
	panel3=map(normstar,panel3)

	panel3= map( lambda t: t-1, panel3)
	panel3=map(lambda t: np.sqrt(np.nanmean(np.square(t))), panel3)
	'''
	panel2=np.asarray(panel2)
	panels=[]
	panels.append(panel1)
	panels.append(panel2)
	panels.append(smooth1)
	panels.append(panel3)
	return panels,toss
Пример #19
0
def plot_meds():
    

    filename_cat= 'DES0555-5957_i_cat.fits'
    filename_meds = 'DES0555-5957-i-meds-011.fits.fz'

    medsobj = meds.MEDS(filename_meds)
    sexcat = pyfits.getdata(filename_cat)

    medcat = medsobj._cat

    print 'n_objects meds   ' , len(medcat)
    print 'n_objects sexcat ' , len(sexcat)

    select = sexcat['FLUXERR_AUTO'] > 1e-3
    
    snr = sexcat['FLUX_AUTO'][select]/sexcat['FLUXERR_AUTO'][select]

    pl.figure()
    pl.hist(snr,bins=np.linspace(0,100))

    select = (snr<50) * (snr>49)

    high_snrs = np.nonzero(select)[0]
    print 'len high_snrs' , len(high_snrs) 
 
    obj_id = high_snrs[2]
    # obj_id = 6

    print 'ncutout',medsobj._cat['ncutout'][obj_id]
    print 'obj_id' , obj_id

    mosaic = medsobj.get_mosaic(obj_id)
    pl.figure()
    pl.imshow(mosaic,interpolation='nearest')    

    pl.figure()
    pl.plot(mosaic.flatten())

    print 'np.std(mosaic.flatten())' , np.std(mosaic.flatten(),ddof=1)
    print 'median_absolute_deviation' , median_absolute_deviation(mosaic.flatten())


 
    clipped_mosaic,_ = sigma_clip(mosaic.flatten(),sig=2)
    print 'np.std(clipped_mosaic)' , np.std(clipped_mosaic,ddof=1)

    pl.figure()
    pl.plot(clipped_mosaic.flatten())

    pl.figure()
    pl.hist(mosaic.flatten(),bins=100)

    pl.show()



    import pdb; pdb.set_trace()
Пример #20
0
def array_process(idata, sigcut, compress = False):
    '''Create the sigma-clipped data array.'''
    data = copy(idata)
    scut = np.clip(sigcut, 1.0, sigcut) #if we clip too low, this will never end

    good_data = sigma_clip(data, scut, iters=None)
    if compress:
        return good_data.compressed()
    return good_data
Пример #21
0
def group_clip(data, v0):
    """ Remove galaxies that are not members. """
    idx = np.where(np.abs(data[3] - v0) < 1500)[0]
    d = tuple([x[idx] for x in data])
    newdata = sigma_clip(d[3], sigma=3, cenfunc=np.median,
                               stdfunc=funcs.biweight_midvariance,
                               copy=True, iters=10)
    mask = ~newdata._mask
    return tuple([x[mask] for x in d])
Пример #22
0
def average_background(bkg_list, sigma, maxiters):

    """
    Average multiple background exposures into a combined data model

    Parameters:
    -----------

    bkg_list: filename list
        List of background exposure file names

    Returns:
    --------

    avg_bkg: data model
        The averaged background exposure

    """

    num_bkg = len(bkg_list)
    avg_bkg = None
    cdata = None

    # Loop over the images to be used as background
    for i, bkg_file in enumerate(bkg_list):
        log.debug(' Accumulate bkg from {}'.format(bkg_file))
        bkg_model = datamodels.ImageModel(bkg_file)

        # Initialize the avg_bkg model, if necessary
        if avg_bkg is None:
            avg_bkg = datamodels.ImageModel(bkg_model.shape)

        if cdata is None:
            cdata = np.zeros(((num_bkg,) + bkg_model.shape))
            cerr = cdata.copy()

        # Accumulate the data from this background image
        cdata[i] = bkg_model.data
        cerr[i] = bkg_model.err * bkg_model.err
        avg_bkg.dq = np.bitwise_or(avg_bkg.dq, bkg_model.dq)

        bkg_model.close()

    # Clip the background data
    log.debug(' clip with sigma={} maxiters={}'.format(sigma, maxiters))
    mdata = sigma_clip(cdata, sigma=sigma, maxiters=maxiters, axis=0)

    # Compute the mean of the non-clipped values
    avg_bkg.data = mdata.mean(axis=0).data

    # Mask the ERR values using the data mask
    merr = np.ma.masked_array(cerr, mask=mdata.mask)

    # Compute the combined ERR as the uncertainty in the mean
    avg_bkg.err = np.sqrt(merr.sum(axis=0)) / (num_bkg - merr.mask.sum(axis=0))

    return avg_bkg
Пример #23
0
def stack_spectra(df, colwave='wave', colf='fnu', colfu='fnu_u', colmask=[], output_wave_array=[], pre='f', sigmaclip=3) :
    ''' General-purpose function to stack spectra.  Rebins wavelength.
    Does not de-redshift spectra.  If you want to stack in rest frame, run jrr.spec.convert2restframe_df(df) beforehand.
    Any normalization by continuum should be done beforehand.
    Input df{} is a dictionary of pandas data frames that contains the spectra.
    colwave, colf, colfu, colmask, tell where to find the columns for wavelength, flux/flam/fnu, uncertainty, & input mask.
    colmask is a column in dataframe of values to mask (True=masked)
    Output wavelength array will be output_wave_array if it is supplied; else will use 1st spectrum in df{}.
    '''
    if len(output_wave_array) :
        print("Caution: overriding the default wavelength range and dispersion!")
        stacked = pandas.DataFrame(data=output_wave_array, columns=(colwave,))
    else :
        stacked = pandas.DataFrame(data=df[list(df.keys())[0]][colwave])  # Get output wavelength array from first spectrum
    nbins = stacked.shape[0]  #N of pixels
    nspectra = len(df)
    nf    =   np.ma.zeros(shape=(nspectra, nbins))   # temp array that will hold the input spectra
    nf_u  =   np.ma.zeros(shape=(nspectra, nbins))   # using numpy masked arrays so can ignore nans from rebin_spec_new
    for ii, spec in enumerate(iter(df.values())):   # Rebin each spectrum (spec), and load all spectra into big fat arrays.
        ma_spec = spec     # masked version of spectrum
        if colmask :
            ma_spec.loc[ma_spec[colmask], colf]  = np.nan    # try setting to nan to solve the gap problem
            #ma_spec.loc[ma_spec[colmask], colfu] = ma_spec[colf].median() * 1E6  # set this huge
        nf[ii]   = np.ma.masked_invalid(rebin_spec_new(ma_spec[colwave], ma_spec[colf],  stacked[colwave], return_masked=True)) # fnu/flam rebinned
        nf_u[ii] = np.ma.masked_invalid(rebin_spec_new(ma_spec[colwave], ma_spec[colfu], stacked[colwave], return_masked=True))  # uncertainty on above
    nf[ :,  0:2].mask = True  # mask the first 2 and last 2 pixels  of each spectrum
    nf[ :, -2:].mask = True
    ## NEED TO HAVE IT USE THE COLMASK, IF DEFINED.
        # Is rebinning handling the uncertainties correctly?
    stacked[pre+'sum']    = np.ma.sum(nf, axis=0)
    stacked[pre+'sum_u']  = util.add_in_quad(nf_u, axis=0)
    stacked[pre+'avg']    = np.ma.average(nf, axis=0)
    stacked[pre+'avg_u']  = stacked[pre+'sum_u'] /  np.ma.MaskedArray.count(nf, axis=0)
    weights = nf_u ** -2              # compute the weighted avg
    (stacked[pre+'weightavg'], sumweight) = np.ma.average(nf, axis=0, weights=weights, returned=True) # weighted avg
    stacked[pre+'weightavg_u'] =  sumweight**-0.5
    nf_clip  = sigma_clip(nf, sig=sigmaclip, iters=None, axis=0)
    stacked[pre+'clipavg'], sumweight2   = np.ma.average(nf_clip, axis=0, weights=weights, returned=True)
    stacked[pre+'clipavg_u'] = sumweight2**-0.5   
    stacked[pre+'median']   = np.ma.median(nf, axis=0)
    stacked[pre+'medianxN'] = np.ma.median(nf, axis=0) * np.ma.MaskedArray.count(nf, axis=0) 
    stacked['Ngal'] = np.ma.MaskedArray.count(nf, axis=0)  # How many spectra contribute to each wavelength
    
    # compute the jackknife variance.  Adapt from mage_stack_redo.py.
    jackknife= np.ma.zeros(shape=(nspectra, nbins)) # The goal.
    jack_var = np.ma.zeros(shape=nbins)
    for ii in range(0, nspectra) :
        jnf = nf.copy()
        #print "DEBUGGING Jackknife, dropping ", ii,  "from the stack"
        jnf[ii, :].mask = True  # Mask one spectrum
        jackknife[ii], weight = np.ma.average(jnf, axis=0, weights=weights, returned=True)  # all the work is done here.
        jack_var = jack_var +  (jackknife[ii] - stacked[pre+'weightavg'])**2
    jack_var *= ((nspectra -1.0)/float(nspectra))   
    jack_std = np.sqrt(jack_var)
    stacked[pre+'jack_std'] = jack_std
    return(stacked, nf, nf_u)
Пример #24
0
def tossoutframe(allstackd):
	#5 sigmaclip of x0,y0,F
	allstackd=np.asarray(allstackd)
	temp=np.apply_along_axis(lambda m: convolve(m, Box1DKernel(50)), axis=0, arr=allstackd)
	#print temp[4,13:18,13:18]
	#Smoothening
	allstackd-=temp
	image_data1= sigma_clipping(temp)
	image_data2= bgsubtract(image_data1)
	cx, cy = centroid(image_data2)
	cx=sigma_clip(cx,sigma=5)
	cy=sigma_clip(cy,sigma=5)
	xax=np.arange(1.5,4.75,step=0.25)
	flist=map(lambda t: sigma_clip(aperphot(image_data2,t,cx,cy),sigma=5), xax)
	flist=np.asarray(flist)
	flist = map(normstar,flist)
	flist = map( lambda t: t-1, flist)
	flist=map(lambda t: np.sqrt(np.nanmean(np.square(t))), flist)
	return flist
Пример #25
0
def reject_outliers(rmcal,simdat,**kwargs):
	sig = kwargs.get('reject_sig',3.0)
	iters = kwargs.get('reject_niter',2)
	for i,obj in enumerate(rmcal):
		mags,errs = rmcal.get_object_phot(obj)
		clipped = sigma_clip(mags,sig=sig,iters=iters)
# need a verbose argument
#		if clipped.mask.sum() > mags.mask.sum():
#			print 'object %d rejected %d' % (i,(clipped.mask&~mags.mask).sum())
		obj.update_mask(clipped.mask)
Пример #26
0
 def remove_outliers(self, sigma=5):
     fmodel = self.flux_model(self.de.minimum_location)[0]
     times, fluxes, pbids, errors = [], [], [], []
     for i in range(len(self.times)):
         res = self.fluxes[i] - fmodel[i]
         mask = ~sigma_clip(res, sigma=sigma).mask
         times.append(self.times[i][mask])
         fluxes.append(self.fluxes[i][mask])
         if self.errors is not None:
             errors.append(self.errors[i][mask])
     self._init_data(times, fluxes, self.pbids, (errors if self.errors is not None else None))
Пример #27
0
    def __init__(self, filenames, hdu_num):
        self.filenames = filenames
        self.nfiles = len(filenames)
        self.hdu_num = hdu_num
        self.images = []
        for filename in filenames:
            img = RawImage(filename, hdu_num)
            img.subtract_overscan()
            self.images.append(img)

        nx, ny = self.images[0].data.shape
        cube = np.zeros((self.nfiles, nx, ny))
        cube_weights = np.zeros_like(cube)
        for j in range(self.nfiles):
            cube[j, :, :] = self.images[j].data
            cube_weights[j, :, :] = self.images[j].data_err ** -2.0

        cubeclipped = stats.sigma_clip(cube, sig=3.0, cenfunc=np.mean, axis=0)

        # Compute the weighted mean for each pixel in the stack, using
        # only those weights for counts that weren't clipped.
        cubemean = np.ma.average(cubeclipped, axis=0, weights=cube_weights)
        # In the very unlikely (impossible?) event that all pixels in the
        # stack were masked, assume zero counts.
        cubemean = np.ma.filled(cubemean, 0.0)
        self.data = cubemean

        # Use pairwise differences of overscan-subtracted biases to estimate
        # the error.
        rmses = []
        for i in range(0, self.nfiles - 1, 2):
            diff = (
                self.images[i + 1].data[self.images[i + 1].py_datasec] - self.images[i].data[self.images[i].py_datasec]
            )
            mn = np.mean(diff)
            rms = np.sqrt(np.mean((diff - mn) ** 2.0))
            rmses.append(rms)
        mean_rms = np.mean(rmses)
        self.data_err = np.full_like(self.data, mean_rms)
        print "hdu %d, bias diff rms %f, scatter %f" % (
            hdu_num,
            mean_rms,
            np.sqrt(np.mean((np.array(rmses) - mean_rms) ** 2.0)),
        )

        # Mask those pixels whose rms in the stack of unclipped pixels is
        # greater than five times the rms measured from pairwise differences.
        rms_arr = np.ma.average((cubeclipped - cubemean) ** 2.0, axis=0, weights=cube_weights)
        rms_arr = np.sqrt(np.ma.filled(rms_arr, 0.0))

        pixmask = np.zeros_like(cubemean, dtype=int)
        bad_pix = ((rms_arr == 0.0) | (rms_arr > 5.0 * mean_rms)) & (pixmask == 0)
        pixmask[bad_pix] = 1
        self.data_mask = pixmask
Пример #28
0
def sigmaclip_limitsig(data,error=None,sig_limit=None,**kwd):
	# sigma clipping with limiting sigma 	
	data=np.array(data)
	mdata=ast.sigma_clip(data,**kwd)
	if sig_limit is not None:
		while(True):
			med=np.ma.median(mdata)
			sig=np.ma.std(mdata)
			if sig < sig_limit: break
			index=np.ma.argmax(np.ma.abs(mdata-med))
			mdata.mask[index]=True
	return mdata
Пример #29
0
    def lc_plot_diff_mag(self, result_file_path=None,
                         best_comparison_star=None,
                         mark_color="blue",
                         bar_color="red"):

        print("Plotting asteroid's LC...")

        fn = os.path.basename(result_file_path).split('.')[0]

        # Two subplots, the axes array is 1-d
        # Plotting settings
        rcParams['figure.figsize'] = [10., 8.]

        lc_ast_diff = plt.figure()
        gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])

        results = self.find_best_comp(result_file_path=result_file_path,
                                      best_comparison_star=best_comparison_star)['with_mean_comp']

        filtered_jd_vs_mag_diff = sigma_clip(results['t-c'],
                                         sigma=3,
                                         iters=10, stdfunc=mad_std)

        # use only not rejected data (because umask used)
        filtered_diff_umask = np.logical_not(filtered_jd_vs_mag_diff.mask)

        # jd vs magt - magi
        lc = lc_ast_diff.add_subplot(gs[0])
        lc.set_title(fn)
        lc.legend(loc=2, numpoints=1)
        lc.grid(True)
        lc.invert_yaxis()
        lc.set_xlabel("$JD$", fontsize=12)
        lc.set_ylabel("Diff Mag. ({0} - {1})".format(
            results['ast_num'][0],
            results['nomad1'][0]),
            fontsize=12)
        # Plotting settings

        lc.errorbar(
            results['jd'][filtered_diff_umask],
            results['t-c'][filtered_diff_umask],
            yerr=results['t-c-err'][filtered_diff_umask],
            fmt='o',
            ecolor=bar_color,
            color=mark_color,
            capsize=5,
            elinewidth=2,
            label='{0} - {1}'.format(fn, results['nomad1'][0]))

        lc.legend(loc=2, numpoints=1)
        lc_ast_diff.savefig("{0}/{1}_jd_vs_diff_mag_lc.pdf".format(os.getcwd(), fn))
Пример #30
0
def measure_image(image, wedge, clip_sigma=None):
    """Measure an image with the given wedge definition.

    Parameters
    ----------
    image : ndarray
        The image to measure. It should be consistent with the image shape
        expected by the :class:`WedgeBins` definition.
    wedge : :class:`WedgeBins`
        The binning defition.
    clip_sigma : float
        If set, this is the Gaussian standard deviation to apply sigma-clipping
        against.

    Returns
    -------
    profile : ndarray
        A `numpy` structured array with the following fields:

        - `area`, the area is square arcseconds
        - `R`, the radial distance at the bin mid-point, in arcseconds
        - `R_inner`, the radial distance at the inside edge, in arcseconds
        - `R_outer`, the radial distance at the outside edge, in arcseconds
        - `median`, the median pixel intensity in the bin
        - `sigma`, the standard deviation of pixel intensities
    """
    dt = [('area', np.float), ('R', np.float), ('R_inner', np.float),
          ('R_outer', np.float), ('median', np.float), ('sigma', np.float)]
    profile = np.zeros(wedge.n_bins, dtype=np.dtype(dt))
    for i, b in enumerate(wedge):
        x1, x2 = b['xlim']
        y1, y2 = b['ylim']
        pixels = image[y1:y2, x1:x2].ravel()
        good = np.where(np.isfinite(pixels))[0]
        pixels = pixels[good]
        if clip_sigma is not None:
            # Perform sigma clipping
            filtered = sigma_clip(pixels, sig=clip_sigma, iters=1)
            good_only = filtered.data[~filtered.mask]
            median = np.median(good_only)
            sigma = np.std(good_only)
        else:
            # No sigma clipping
            median = np.median(pixels)
            sigma = np.std(pixels)
        profile[i]['area'] = b['area']
        profile[i]['R'] = b['r_mid']
        profile[i]['R_inner'] = b['r_inner']
        profile[i]['R_outer'] = b['r_outer']
        profile[i]['median'] = median
        profile[i]['sigma'] = sigma
    return profile
Пример #31
0
fileids = np.array(range(len(ckpt)))[ckpt[:, 1] == '0']

nf = len(ckpt)
outfile = outdir + "/" + outname
for i, f in zip(fileids, files):
    ticid = f.split("-")[2][-9:]
    camp = f.split("-")[1][-2:]
    print("computing {0} of {1} lightcurves\n TIC {2}".format(
        i + 1, nf, ticid))
    summaryfile = ticid + "_" + camp + "_summary.png"
    cornerfile = ticid + "_" + camp + "_corner.png"
    errfile = ticid + "_" + camp + "_err.dat"
    lc = Lightcurve.tess(f)
    try:
        y = lc.flux - utils.trend(lc.t, lc.flux, 2)
        mask = sigma_clip(y - medfilt(y, kernel_size=301), sigma=3)
        x = lc.t[mask.mask == False]
        y = y[mask.mask == False]
        yerr = np.std(y - medfilt(y, kernel_size=51))
        x = x[::5]
        y = y[::5]

        with pm.Model() as model:

            mean = pm.Normal("mean", mu=np.mean(y), sd=np.std(y))
            yerr = pm.Normal("yerr", mu=yerr, sd=5.0)
            logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5)
            period = utils.periodprior(lc.t, lc.flux, min_period=0.1)
            logQ0 = pm.Uniform("logQ0", lower=-10, upper=10)
            logdQ = pm.Normal("logdQ", mu=2.0, sd=5.0)
            mix = pm.Uniform("mix", lower=0.0, upper=1.0)
def meas_sig_old(time, flux, weights, plot_q=False):

    from astropy.stats import sigma_clip

    df = np.diff(flux)

    x = np.copy(time[:-1])  #np.arange(len(df))

    dff1 = csaps.UnivariateCubicSmoothingSpline(
        x,
        df,
        smooth=0.1,
        weights=weights[:-1],
    )(x)

    #     print('dff1', dff1)

    dfn = df - dff1

    dfnf = np.copy(dfn)

    #     plt.figure(figsize=(10, 4))
    #     plt.plot(x, df, '.k', label='Orig dF')
    #     plt.plot(x, dff1, '-c', label='dF fit 1')

    #     plt.figure(figsize=(10, 4))
    #     plt.plot(x, dfn, '.k', label='Orig dF')

    for sd in [5]:

        dfnf = sigma_clip(dfnf, sigma=sd, maxiters=2)

    __, gi, __ = np.intersect1d(dfn, dfnf, return_indices=1)

    gi = np.sort(gi)

    #     plt.figure(figsize=(10, 4))
    #     plt.plot(x[gi], dfn[gi], '.k', label='Orig dF')

    #     try:

    dff = csaps.UnivariateCubicSmoothingSpline(
        x[gi],
        df[gi],
        smooth=0.1,
        weights=weights[:-1][gi],
    )(x)
    #     except:

    #         print('FAIL', len(x), len(gi))

    #         #FAIL 271 270 36338 -36608
    #         #FAIL 271 270 36338 -36608

    #         return

    dfn = df - dff

    if plot_q:
        plt.figure(figsize=(10, 4))
        plt.plot(x, df, '.k', label='Orig dF')
        plt.plot(x, dff1, '-c', label='dF fit 1')
        plt.plot(x, dff, '-m', label='dF fit 2')
        plt.legend()

    for sd in [5, 3]:

        dfn = sigma_clip(dfn, sigma=sd, maxiters=5)

    if plot_q:
        plt.figure(figsize=(10, 7))
        plt.title('RMS = ' + str(np.std(dfn)))
        plt.plot(x, dfn, '.k')

    return np.std(dfn)
Пример #33
0
def eden_GPDetrend(telescope, datafolder, targets, calibrated=True):
    # define constants from config.ini
    config = ConfigParser()
    config.read('config.ini')
    server_destination = config['FOLDER OPTIONS']['server_destination']

    # create GPDetrend inputs, and run GPDetrend

    # create folder in post_processing for GPLC files
    if calibrated:
        out_dir = datafolder + 'calib_post_processing/' + 'GPLC/'
    else:
        out_dir = datafolder + 'post_processing/' + 'GPLC/'
    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    # Load necessary files to create GPDetrend inputs
    nflux = np.genfromtxt(datafolder + 'post_processing/' + targets[0] +
                          '.dat')
    LC = np.genfromtxt(datafolder + 'post_processing/LC/' + targets[0] +
                       '.epdlc')
    comps = glob.glob(datafolder + 'post_processing/comp_light_curves/*')

    # Store information
    # lc file
    times = nflux[:, 0]
    flux = nflux[:, 1]

    # sigma clip flux
    filt = sigma_clip(flux, sigma=5)
    filt = np.invert(filt.mask)

    # eparams file
    etime = LC[:, 1]
    airmass = LC[:, 22]
    FWHM = LC[:, 19]
    cen_X = LC[:, 15]
    cen_Y = LC[:, 16]
    bg = LC[:, 17]
    # for some reason the eparam times do not always match the flux times, this (usually) finds and removes the extras
    if len(times) != len(airmass):
        print('LC length does not match comps and eparams length!')
        print('Time length: ', len(times), 'eparams length: ', len(airmass))

        # rounding to 5 gets rid of small differences
        times = np.round(times, decimals=5)
        etime = np.round(etime, decimals=5)
        mask = np.in1d(etime, times)  # find values truly not in lc

        airmass = airmass[mask]
        FWHM = FWHM[mask]
        cen_X = cen_X[mask]
        cen_Y = cen_Y[mask]
        bg = bg[mask]

    # comps file
    cflux = np.zeros((len(times), int(len(comps) / 4)))
    count = 0
    for j in range(len(comps)):
        if 'pdf' or 'sigma' or 'norm' not in comps[j]:
            try:  # does not always work
                comp = np.genfromtxt(comps[j])
                if len(times) != len(airmass):
                    comp = comp[mask]
                cflux[:, count] = comp[:, 1]
                count = count + 1
            except:
                pass
            else:
                pass

    # sigma mask
    times, flux = times[filt], flux[filt]
    airmass, FWHM, cen_X, cen_Y, bg = airmass[filt], FWHM[filt], cen_X[
        filt], cen_Y[filt], bg[filt]
    cflux = cflux[filt, :]

    # Write the GPDetrend input files
    # array format
    light = np.array([times, flux, np.zeros(len(times))])
    eparams = np.array([times, airmass, FWHM, bg, cen_X, cen_Y], dtype='float')

    # the FWHM often contains nans, this removes those times from all files.
    rem = np.where(np.isnan(FWHM))

    # Remove times with FWHM Nans and transpose
    eparams = np.delete(np.transpose(eparams), rem, axis=0)
    light = np.delete(np.transpose(light), rem, axis=0)
    cflux = np.delete(cflux, rem, axis=0)

    # write
    lfile = out_dir + 'lc.dat'
    efile = out_dir + 'eparams.dat'
    cfile = out_dir + 'comps.dat'
    ofolder = 'detrend_'

    np.savetxt(lfile, light, fmt='%1.6f', delimiter='       ')
    np.savetxt(cfile, cflux, fmt='%1.6f', delimiter='       ')
    np.savetxt(efile,
               eparams,
               fmt='%1.6f',
               delimiter='       ',
               header='times, airmass, fwhm, background, x_cen, y_cen')

    # RUN DETREND
    # changing directories seems necessary, otherwise the out folder is too long a name for multinest
    mycwd = os.getcwd()
    os.chdir(out_dir)
    os.system('python ' + mycwd + '/GPDetrend.py -ofolder ' + ofolder +
              ' -lcfile ' + lfile + ' -eparamfile ' + efile + ' -compfile ' +
              cfile + ' -eparamtouse  all')
    os.chdir(mycwd)
Пример #34
0
def centroid_FWM(image_data,
                 xo=[],
                 yo=[],
                 wx=[],
                 wy=[],
                 scale=1,
                 bounds=(13, 18, 13, 18)):
    '''
	Gets the centroid of the target by flux weighted mean and the PSF width
	of the target.

	Parameters:
	-----------

	    img_data :(3D array) 
	    	Data cube of images (2D arrays of pixel values).

	    xo        : list (optional)
	    	List of x-centroid obtained previously. Default if none given is an 
	    	empty list.

	    yo        : list (optional)
	    	List of y-centroids obtained previously. Default if none given is an 
	    	empty list.

	    wx        : list (optional)
	    	List of PSF width (x-axis) obtained previously. Default if none given 
	    	is an empty list.

	    wy        : list (optional)
	    	List of PSF width (x-axis) obtained previously. Default if none given 
	    	is an empty list.

	    scale     : int (optional)
	    	If the image is oversampled, scaling factor for centroid and bounds, 
	    	i.e, give centroid in terms of the pixel value of the initial image.

		bounds    : tuple (optional)
			Bounds of box around the target to exclude background . Default is (11, 19 ,11, 19).
    
    Returns:
    --------

	    xo        : list
	    	Updated list of x-centroid obtained previously.

	    yo        : list
	    	Updated list of y-centroids obtained previously.

	    wx        : list
	    	Updated list of PSF width (x-axis) obtained previously.

	    wy        : list
	    	Updated list of PSF width (x-axis) obtained previously.
	'''
    lbx, ubx, lby, uby = bounds
    lbx, ubx, lby, uby = lbx * scale, ubx * scale, lby * scale, uby * scale
    starbox = image_data[:, lbx:ubx, lby:uby]
    h, w, l = starbox.shape
    # get centroid
    X, Y = np.mgrid[:w, :l]
    cx = (np.sum(np.sum(X * starbox, axis=1), axis=1) /
          (np.sum(np.sum(starbox, axis=1), axis=1))) + lbx
    cy = (np.sum(np.sum(Y * starbox, axis=1), axis=1) /
          (np.sum(np.sum(starbox, axis=1), axis=1))) + lby
    cx = sigma_clip(cx, sigma=4, iters=2, cenfunc=np.ma.median)
    cy = sigma_clip(cy, sigma=4, iters=2, cenfunc=np.ma.median)
    xo.extend(cx / scale)
    yo.extend(cy / scale)
    # get PSF widths
    X, Y = np.repeat(X[np.newaxis, :, :], h,
                     axis=0), np.repeat(Y[np.newaxis, :, :], h, axis=0)
    cx, cy = np.reshape(cx, (h, 1, 1)), np.reshape(cy, (h, 1, 1))
    X2, Y2 = (X + lbx - cx)**2, (Y + lby - cy)**2
    widx = np.sqrt(
        np.sum(np.sum(X2 * starbox, axis=1), axis=1) /
        (np.sum(np.sum(starbox, axis=1), axis=1)))
    widy = np.sqrt(
        np.sum(np.sum(Y2 * starbox, axis=1), axis=1) /
        (np.sum(np.sum(starbox, axis=1), axis=1)))
    widx = sigma_clip(widx, sigma=4, iters=2, cenfunc=np.ma.median)
    widy = sigma_clip(widy, sigma=4, iters=2, cenfunc=np.ma.median)
    wx.extend(widx / scale)
    wy.extend(widy / scale)
    return xo, yo, wx, wy
Пример #35
0
    campaign_no = campaign_no[:-31]
    print("CAMPAIGN " + str(campaign_no))
        
#normalize to 0.0
    flux = [i-1 for i in flux]

    #print('flux = ' + str(type(flux)))

    flux = np.asarray(flux)
    time = np.asarray(time)

    #print(type(flux))


#SIGMA CLIPPING
    flux = sigma_clip(flux, sigma=3, iters=1)

#uncomment if extra time stamp
    #time.pop()


    
        
    period = np.random.uniform(low=1, high=26)
#period = 8.0
    tested_period.append(period)
    print('Inj period = ' + str(period))
        
        #rprs = np.random.choice(np.arange(.1, .9, .1), 1)
    rprs = np.random.uniform(low=.05, high=.95)
#rprs = .6
Пример #36
0
def cube_detect_badfr_ellipticity(array, fwhm, crop_size=30, roundlo=-0.2,
                                  roundhi=0.2, plot=True, verbose=True):
    """ Returns the list of bad frames  from a cube by measuring the PSF 
    ellipticity of the central source. Should be applied on a recentered cube.
    
    Parameters
    ----------
    array : numpy ndarray 
        Input 3d array, cube.
    fwhm : float
        FWHM size in pixels.
    crop_size : int, optional
        Size in pixels of the square subframe to be analyzed.
    roundlo, roundhi : float, optional
        Lower and higher bounds for the ellipticity. See ``Notes`` below for
        details.
    plot : bool, optional
        If true it plots the central PSF roundness for each frame.
    verbose : bool, optional
        Whether to print to stdout or not.
        
    Returns
    -------
    good_index_list : numpy ndarray
        1d array of good indices.
    bad_index_list : numpy ndarray
        1d array of bad frames indices.
    
    Notes
    -----
    From photutils.DAOStarFinder documentation:
    DAOFIND calculates the object roundness using two methods. The 'roundlo'
    and 'roundhi' bounds are applied to both measures of roundness. The first
    method ('roundness1'; called 'SROUND' in DAOFIND) is based on the source 
    symmetry and is the ratio of a measure of the object's bilateral (2-fold) 
    to four-fold symmetry. The second roundness statistic ('roundness2'; called 
    'GROUND' in DAOFIND) measures the ratio of the difference in the height of
    the best fitting Gaussian function in x minus the best fitting Gaussian 
    function in y, divided by the average of the best fitting Gaussian 
    functions in x and y. A circular source will have a zero roundness. A source
    extended in x or y will have a negative or positive roundness, respectively.
    
    """
    from .cosmetics import cube_crop_frames

    check_array(array, 3, msg='array')
    
    if verbose:
        start_time = time_ini()

    array = cube_crop_frames(array, crop_size, verbose=False)
    n = array.shape[0]
    goodfr = []
    badfr = []
    roundness1 = []
    roundness2 = []
    for i in range(n):
        ff_clipped = sigma_clip(array[i], sigma=3, maxiters=None)
        thr = ff_clipped.max()
        DAOFIND = DAOStarFinder(threshold=thr, fwhm=fwhm)
        tbl = DAOFIND.find_stars(array[i])
        table_mask = (tbl['peak'] == tbl['peak'].max())
        tbl = tbl[table_mask]
        roun1 = tbl['roundness1'][0]
        roun2 = tbl['roundness2'][0]
        roundness1.append(roun1)
        roundness2.append(roun2)
        # we check the roundness
        if roundhi > roun1 > roundlo and roundhi > roun2 > roundlo:
            goodfr.append(i)
        else:
            badfr.append(i)
    
    bad_index_list = np.array(badfr)
    good_index_list = np.array(goodfr)

    if plot:
        _, ax = plt.subplots(figsize=vip_figsize)
        x = range(len(roundness1))
        if n > 5000:
            marker = ','
        else:
            marker = 'o'
        ax.plot(x, roundness1, '-', alpha=0.6, color='#1f77b4',
                label='roundness1')
        ax.plot(x, roundness1, marker=marker, alpha=0.4, color='#1f77b4')
        ax.plot(x, roundness2, '-', alpha=0.6, color='#9467bd',
                label='roundness2')
        ax.plot(x, roundness2, marker=marker, alpha=0.4, color='#9467bd')
        ax.hlines(roundlo, xmin=-1, xmax=n + 1, lw=2, colors='#ff7f0e',
                  linestyles='dashed', label='roundlo', alpha=0.6)
        ax.hlines(roundhi, xmin=-1, xmax=n + 1, lw=2, colors='#ff7f0e',
                  linestyles='dashdot', label='roundhi', alpha=0.6)
        plt.xlabel('Frame number')
        plt.ylabel('Roundness')
        plt.xlim(xmin=-1, xmax=n + 1)
        plt.legend(fancybox=True, framealpha=0.5, loc='best')
        plt.grid('on', alpha=0.2)

    if verbose:
        bad = len(bad_index_list)
        percent_bad_frames = (bad*100)/n
        msg1 = "Done detecting bad frames from cube: {} out of {} ({:.3}%)"
        print(msg1.format(bad, n, percent_bad_frames))
        timing(start_time)
    
    return good_index_list, bad_index_list
def test_median(median):
    '''Test median image.'''

    save_figs = True

    # create fake data for subpixel dither 1 to test median
    with fits.open("V54321001002P0000000001101_A1_F150W_cal.fits") as h:
        h['SCI'].data[:,:] = 3.0
        h['DQ'].data[:,:] = 0
        h.writeto("V54321001002P0000000001101_A1_F150W_cal_mediantest.fits",overwrite=True)


    # create fake data for subpixel dither 2 to test median
    with fits.open("V54321001002P0000000001102_A1_F150W_cal.fits") as h:
        h['SCI'].data[:,:] = 5.0
        h['DQ'].data[:,:] = 0
        h.writeto("V54321001002P0000000001102_A1_F150W_cal_mediantest.fits",overwrite=True)


    # run Image3 pipeline to get outlier detection outputs
    im3 = calwebb_image3.Image3Pipeline(config_file='calwebb_image3.cfg')
    im3.tweakreg.skip = True
    im3.resample.blendheaders = False
    im3.outlier_detection.save_intermediate_results = True
    im3.run(median)

    # get file names and bases to load output data
    input_files = []
    with open(median) as json_data:
         d = json.load(json_data)
    members = d['products'][0]['members']
    base = members[0]['expname'][1:8]
    output_files = glob("jw"+base+"*01101_00001_outlier_i2d.fits") + \
                   glob("jw"+base+"*01102_00001_outlier_i2d.fits")

    input_file_base = members[0]['expname']

    # put results into array
    with fits.open(output_files[0]) as h:
        shape = h[1].data.shape
    all_dithers = np.zeros((len(output_files),shape[0],shape[1]),dtype='float32')
    for i in np.arange(0,len(all_dithers)):
        print(np.shape(fits.getdata(output_files[i],1)))
        all_dithers[i,:,:] = fits.getdata(output_files[i],1)

    # get output median image
    first = input_file_base
    cal = first.find("_F")
    median_file = first[:cal]+"_median.fits"
    median_image = fits.getdata(median_file,1)

    # manually calculate the median
    calc_median_array = np.nanmedian(all_dithers,axis=0)
    diff_array = np.abs(calc_median_array - median_image)

    # sigma-clipping
    clip = sigma_clip(diff_array)
    clip.data[clip.mask] = np.nan
    diff_mean = np.nanmean(clip.data)

    if save_figs == True:

        # save figure to show median
        fig, ax = plt.subplots(1,1,figsize=(10,10))
        plt.ylabel('y pixels',fontsize=15)
        plt.xlabel('x pixels',fontsize=15)
        plt.imshow(median_image, vmin=3.0, vmax=5.0, cmap=plt.cm.gray, origin='lower')
        ax.set_title("Median image",fontsize=12)
        plt.colorbar(orientation='horizontal',pad=0.09)
        plt.savefig(median[:5]+"_median_image.png")

        # save figure to show difference between calculated and output medians
        fig, ax = plt.subplots(1,1,figsize=(10,10))
        plt.ylabel('y pixels',fontsize=15)
        plt.xlabel('x pixels',fontsize=15)
        plt.imshow(diff_array, vmin=-0.5, vmax=0.5, cmap=plt.cm.gray, origin='lower')
        ax.set_title("Difference between median image and manually calculated median",fontsize=12)
        plt.colorbar(orientation='horizontal',pad=0.09)
        plt.savefig(median[:5]+"_median_diff_image.png")

    assert np.isclose(diff_mean,0.0) == True
Пример #38
0
def tls_search(time,
               flux,
               flux_err,
               known_transits=None,
               tls_kwargs=None,
               wotan_kwargs=None,
               options=None):
    '''
    Summary:
    -------
    This runs TLS on these data with the given infos
    
    Inputs:
    -------
    time : array of flaot
        time stamps of observations
    flux : array of flaot
        normalized flux
    flux_err : array of flaot
        error of normalized flux
        
        
    Optional Inputs:
    ----------------
    tls_kwargs : None or dict, keywords:
        R_star : float
            radius of the star (e.g. median)
            default 1 R_sun (from TLS)
        R_star_min : float
            minimum radius of the star (e.g. 1st percentile)
            default 0.13 R_sun (from TLS)
        R_star_max : float
            maximum radius of the star (e.g. 99th percentile)
            default 3.5 R_sun (from TLS)
        M_star : float
            mass of the star (e.g. median)
            default 1. M_sun (from TLS)
        M_star_min : float
            minimum mass of the star (e.g. 1st percentile)
            default 0.1 M_sun (from TLS)
        M_star_max : float
            maximum mass of the star (e.g. 99th percentile)
            default 1. M_sun (from TLS)    
        u : list
            quadratic limb darkening parameters
            default [0.4804, 0.1867]
        ...
            
    SNR_threshold : float
        the SNR threshold at which to stop the TLS search
        
    known_transits : None or dict
        if dict and one transit is already known: 
            known_transits = {'period':[1.3], 'duration':[2.1], 'epoch':[245800.0]}
        if dict and multiple transits are already known: 
            known_transits = {'name':['b','c'], 'period':[1.3, 21.0], 'duration':[2.1, 4.1], 'epoch':[245800.0, 245801.0]}
        'period' is the period of the transit
        'duration' must be the total duration, i.e. from first ingress point to last egrees point, in days
        'epoch' is the epoch of the transit
        
    options : None or dict, keywords:
        show_plot : bool
            show a plot of each phase-folded transit candidate and TLS model in the terminal 
            default is False
        save_plot : bool
            save a plot of each phase-folded transit candidate and TLS model into outdir
            default is False
        outdir : string
            if None, use the current working directory
            default is ""
        
    Returns:
    -------
    List of all TLS results
    '''

    #::: seeed
    np.random.seed(42)

    #::: handle inputs
    if flux_err is None:
        ind = np.where(~np.isnan(time * flux))[0]
        time = time[ind]
        flux = flux[ind]
    else:
        ind = np.where(~np.isnan(time * flux * flux_err))[0]
        time = time[ind]
        flux = flux[ind]
        flux_err = flux_err[ind]

    time_input = 1. * time
    flux_input = 1. * flux  #for plotting

    if wotan_kwargs is None:
        detrend = False
    else:
        detrend = True

        if 'slide_clip' not in wotan_kwargs: wotan_kwargs['slide_clip'] = {}
        if 'window_length' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['window_length'] = 1.
        if 'low' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['low'] = 20.
        if 'high' not in wotan_kwargs['slide_clip']:
            wotan_kwargs['slide_clip']['high'] = 3.

        if 'flatten' not in wotan_kwargs: wotan_kwargs['flatten'] = {}
        if 'method' not in wotan_kwargs['flatten']:
            wotan_kwargs['flatten']['method'] = 'biweight'
        if 'window_length' not in wotan_kwargs['flatten']:
            wotan_kwargs['flatten']['window_length'] = 1.
        #the rest is filled automatically by Wotan

    if tls_kwargs is None: tls_kwargs = {}
    if 'show_progress_bar' not in tls_kwargs:
        tls_kwargs['show_progress_bar'] = False
    if 'SNR_threshold' not in tls_kwargs: tls_kwargs['SNR_threshold'] = 5.
    if 'SDE_threshold' not in tls_kwargs: tls_kwargs['SDE_threshold'] = 5.
    if 'FAP_threshold' not in tls_kwargs: tls_kwargs['FAP_threshold'] = 0.05
    tls_kwargs_original = {
        key: tls_kwargs[key]
        for key in tls_kwargs.keys()
        if key not in ['SNR_threshold', 'SDE_threshold', 'FAP_threshold']
    }  #for the original tls
    #the rest is filled automatically by TLS

    if options is None: options = {}
    if 'show_plot' not in options: options['show_plot'] = False
    if 'save_plot' not in options: options['save_plot'] = False
    if 'outdir' not in options: options['outdir'] = ''

    #::: init
    SNR = 1e12
    SDE = 1e12
    FAP = 0
    FOUND_SIGNAL = False
    results_all = []
    if len(options['outdir']) > 0 and not os.path.exists(options['outdir']):
        os.makedirs(options['outdir'])

    #::: logprint
    with open(os.path.join(options['outdir'], 'logfile.log'), 'w') as f:
        f.write('TLS search, UTC ' +
                datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + '\n')
    logprint('\nWotan kwargs:', options=options)
    logpprint(wotan_kwargs, options=options)
    logprint('\nTLS kwargs:', options=options)
    logpprint(tls_kwargs, options=options)
    logprint('\nOptions:', options=options)
    logpprint(options, options=options)

    #::: apply a mask (if wished so)
    if known_transits is not None:
        for period, duration, T0 in zip(known_transits['period'],
                                        known_transits['duration'],
                                        known_transits['epoch']):
            time, flux, flux_err = mask(time, flux, flux_err, period, duration,
                                        T0)

    #::: global sigma clipping
    flux = sigma_clip(flux, sigma_upper=3, sigma_lower=20)

    #::: detrend (if wished so)
    if detrend:
        flux = slide_clip(time, flux, **wotan_kwargs['slide_clip'])
        flux, trend = flatten(time,
                              flux,
                              return_trend=True,
                              **wotan_kwargs['flatten'])

        if options['show_plot'] or options['save_plot']:
            fig, axes = plt.subplots(2, 1, figsize=(40, 8))
            axes[0].plot(time, flux_input, 'b.', rasterized=True)
            axes[0].plot(time, trend, 'r-', lw=2)
            axes[0].set(ylabel='Flux (input)', xticklabels=[])
            axes[1].plot(time, flux, 'b.', rasterized=True)
            axes[1].set(ylabel='Flux (detrended)', xlabel='Time (BJD)')
        if options['save_plot']:
            fig.savefig(os.path.join(
                options['outdir'],
                'flux_' + wotan_kwargs['flatten']['method'] + '.pdf'),
                        bbox_inches='tight')
            if options['show_plot']:
                plt.show(fig)
            else:
                plt.close(fig)

        X = np.column_stack((time, flux, flux_err, trend))
        np.savetxt(os.path.join(
            options['outdir'],
            'flux_' + wotan_kwargs['flatten']['method'] + '.csv'),
                   X,
                   delimiter=',',
                   header='time,flux_detrended,flux_err,trend')

        time_detrended = 1. * time
        flux_detrended = 1. * flux  #for plotting

    #::: search for the rest
    i = 0
    ind_trs = []
    while (SNR >= tls_kwargs['SNR_threshold']
           ) and (SDE >= tls_kwargs['SDE_threshold']) and (
               FAP <= tls_kwargs['FAP_threshold']) and (FOUND_SIGNAL == False):

        model = tls(time, flux, flux_err)
        results = model.power(**tls_kwargs_original)

        if (results.snr >= tls_kwargs['SNR_threshold']) and (
                results.SDE >= tls_kwargs['SDE_threshold']) and (
                    results.FAP <= tls_kwargs['FAP_threshold']):

            #::: calculcate the correct_duration, as TLS sometimes returns unreasonable durations
            ind_tr_phase = np.where(results['model_folded_model'] < 1.)[0]
            correct_duration = results['period'] * (
                results['model_folded_phase'][ind_tr_phase[-1]] -
                results['model_folded_phase'][ind_tr_phase[0]])

            #::: mark transit
            ind_tr, ind_out = index_transits(time_input, results['T0'],
                                             results['period'],
                                             correct_duration)
            ind_trs.append(ind_tr)

            #::: mask out detected transits and append results
            time1, flux1 = time, flux  #for plotting
            time, flux, flux_err = mask(time, flux, flux_err, results.period,
                                        np.max((1.5 * correct_duration)),
                                        results.T0)
            results_all.append(results)

            #::: write TLS stats to file
            with open(
                    os.path.join(options['outdir'],
                                 'tls_signal_' + str(i) + '.txt'),
                    'wt') as out:
                pprint(results, stream=out)

            #::: individual TLS plots
            if options['show_plot'] or options['save_plot']:
                fig = plt.figure(figsize=(20, 8), tight_layout=True)
                gs = fig.add_gridspec(2, 3)

                ax = fig.add_subplot(gs[0, :])
                ax.plot(time1, flux1, 'b.', rasterized=True)
                ax.plot(results['model_lightcurve_time'],
                        results['model_lightcurve_model'],
                        'r-',
                        lw=3)
                ax.set(xlabel='Time (BJD)', ylabel='Flux')

                ax = fig.add_subplot(gs[1, 0])
                ax.plot(results['folded_phase'],
                        results['folded_y'],
                        'b.',
                        rasterized=True)
                ax.plot(results['model_folded_phase'],
                        results['model_folded_model'],
                        'r-',
                        lw=3)
                ax.set(xlabel='Phase', ylabel='Flux')

                ax = fig.add_subplot(gs[1, 1])
                ax.plot(
                    (results['folded_phase'] - 0.5) * results['period'] * 24,
                    results['folded_y'],
                    'b.',
                    rasterized=True)
                ax.plot((results['model_folded_phase'] - 0.5) *
                        results['period'] * 24,
                        results['model_folded_model'],
                        'r-',
                        lw=3)
                ax.set(xlim=[
                    -1.5 * correct_duration * 24, +1.5 * correct_duration * 24
                ],
                       xlabel='Time (h)',
                       yticks=[])

                ax = fig.add_subplot(gs[1, 2])
                ax.text(.02,
                        0.95,
                        'P = ' +
                        np.format_float_positional(results['period'], 4) +
                        ' d',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(
                    .02,
                    0.85,
                    'Depth = ' +
                    np.format_float_positional(1e3 *
                                               (1. - results['depth']), 4) +
                    ' ppt',
                    ha='left',
                    va='center',
                    transform=ax.transAxes)
                ax.text(.02,
                        0.75,
                        'Duration = ' +
                        np.format_float_positional(24 * correct_duration, 4) +
                        ' h',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.65,
                        'T_0 = ' +
                        np.format_float_positional(results['T0'], 4) + ' d',
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.55,
                        'SNR = ' +
                        np.format_float_positional(results['snr'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.45,
                        'SDE = ' +
                        np.format_float_positional(results['SDE'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.text(.02,
                        0.35,
                        'FAP = ' +
                        np.format_float_scientific(results['FAP'], 4),
                        ha='left',
                        va='center',
                        transform=ax.transAxes)
                ax.set_axis_off()
                if options['save_plot']:
                    fig.savefig(os.path.join(options['outdir'],
                                             'tls_signal_' + str(i) + '.pdf'),
                                bbox_inches='tight')
                if options['show_plot']:
                    plt.show(fig)
                else:
                    plt.close(fig)

        SNR = results.snr
        SDE = results.SDE
        FAP = results.FAP
        i += 1

    #::: full lightcurve plot
    if options['show_plot'] or options['save_plot']:

        if detrend:
            fig, axes = plt.subplots(2, 1, figsize=(40, 8), tight_layout=True)
            ax = axes[0]
            ax.plot(time_input,
                    flux_input,
                    'k.',
                    color='grey',
                    rasterized=True)
            ax.plot(time_input, trend, 'r-', lw=2)
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_input[ind_tr],
                        flux_input[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.set(ylabel='Flux (input)', xticklabels=[])
            ax.legend()

            ax = axes[1]
            ax.plot(time_detrended,
                    flux_detrended,
                    'k.',
                    color='grey',
                    rasterized=True)
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_detrended[ind_tr],
                        flux_detrended[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.set(ylabel='Flux (detrended)', xlabel='Time (BJD)')
            ax.legend()

        else:
            fig = plt.figure(figsize=(20, 4), tight_layout=True)
            fig, ax = plt.subplots(1, 1, figsize=(40, 4))
            ax.plot(time_input,
                    flux_input,
                    'k.',
                    color='grey',
                    rasterized=True)
            ax.set(ylabel='Flux (input)', xlabel='Time (BJD)')
            for number, ind_tr in enumerate(ind_trs):
                ax.plot(time_input[ind_tr],
                        flux_input[ind_tr],
                        marker='.',
                        linestyle='none',
                        label='signal ' + str(number))
            ax.legend()

        if options['save_plot']:
            fig.savefig(os.path.join(options['outdir'], 'tls_signal_all.pdf'),
                        bbox_inches='tight')
        if options['show_plot']:
            plt.show(fig)
        else:
            plt.close(fig)

    return results_all
Пример #39
0
def sens_func(wav,
              cnt,
              exp,
              seeing,
              sw,
              airmass,
              extfile,
              stdfile,
              wave_range,
              order,
              show=1,
              save=0):
    '''
    '''

    # Speed of light
    c = 2.99792458e+10  # [cm/s]

    inverse = False
    if wav[1] < wav[0]:
        wav = wav[::-1]
        cnt = cnt[::-1]
        inverse = True

    # Slit loss correction
    # --------------------
    print(
        F'[Sensitivity function] Slit loss correction: FWHM = {seeing:.2f} [px], Slit Width = {sw:.2f} [px]'
    )
    sl = get_slit_loss(fwhm=seeing, sw=sw)
    cnt = cnt / (1 - sl)

    # Atmospheric extinction correction
    # ---------------------------------
    print(
        '[Sensitivity function] Extinction correction: Load extinction coefficients'
    )
    wav_ext, ext = np.loadtxt(extfile).T
    print('[Sensitivity function] Extinction correction: Dereddening')
    ext = interp1d(wav_ext,
                   ext,
                   kind='quadratic',
                   bounds_error=False,
                   fill_value='extrapolate')(wav)
    ext_corr_factor = 10**(0.4 * airmass * ext)
    cnt = cnt * ext_corr_factor

    # Convert to counts/A/s
    # ---------------------
    # 1. Bandpass
    dwav = np.abs(np.diff(wav))
    dwav = np.hstack([dwav[0], dwav, dwav[-1]])
    dwav = (dwav[:-1] + dwav[1:]) / 2
    # 2. Convert
    cnt = cnt / dwav / exp

    # Sensitivity function
    # --------------------
    if not os.path.exists(os.path.join(os.getcwd(), stdfile)):
        if not os.path.exists(
                os.path.join(
                    os.path.split(os.path.realpath(__file__))[0], 'onedstds/',
                    stdfile)):
            raise FileNotFoundError('No standard file found.')
            sys.exit()
        else:
            stdfile = os.path.join(
                os.path.split(os.path.realpath(__file__))[0], 'onedstds/',
                stdfile)
    else:
        stdfile = os.path.join(os.getcwd(), stdfile)
    # 1. Load standard spectrum
    print('[Sensitivity function] Load archived standard spectrum')
    if os.path.split(os.path.split(stdfile)[0])[1] == 'calspec':
        tab = Table.read(stdfile)
        wav_mag = tab['WAVELENGTH'].data
        flx_mod = tab['FLUX'].data
        bp = tab['FWHM'].data
    else:
        stdspec = np.loadtxt(stdfile, skiprows=1).T
        if stdspec.shape[0] == 3:
            wav_mag, mag, bp = stdspec
        elif stdspec.shape[0] == 2:
            wav_mag, mag = stdspec
            dwav_mag = np.abs(np.diff(wav_mag))
            dwav_mag = np.hstack([dwav_mag[0], dwav_mag, dwav_mag[-1]])
            bp = (dwav_mag[:-1] + dwav_mag[1:]) / 2
        flx_mod = 10**(-0.4 *
                       mag) * 3631e-23 * c / wav_mag**2 * 1e8  # [erg/cm2/s/A]
    # 2. Comparision
    print('[Sensitivity function] Comparision')
    flx_obs = np.zeros(flx_mod.shape[0])
    bins = np.vstack([wav_mag - bp / 2, wav_mag + bp / 2]).T
    for i, bin in enumerate(bins):
        idx = (bin[0] < wav) & (wav < bin[1])
        edges = interp1d(wav,
                         cnt,
                         'linear',
                         bounds_error=False,
                         fill_value=np.nan)(bin)
        flx_obs[i] = np.trapz(np.hstack([edges[0], cnt[idx], edges[1]]),
                              x=np.hstack([bin[0], wav[idx], bin[1]
                                           ])) / (bin[1] - bin[0])
    sen = flx_obs / flx_mod
    # 3. Filter NaN
    idx = np.isnan(sen)
    wav_sen = wav_mag[~idx]
    sen = 2.5 * np.log10(sen[~idx])

    print('[Sensitivity function] Fitting')
    mask = ~np.isnan(sen) & (wave_range[0] < wav_sen) & (wav_sen <
                                                         wave_range[1])
    for i in range(5):
        knots = np.r_[(wav_sen[mask][0], ) * (order + 1),
                      (wav_sen[mask][-1], ) * (order + 1)]
        spl = make_lsq_spline(wav_sen[mask], sen[mask], t=knots, k=order)
        mask = mask & ~sigma_clip(
            sen - spl(wav_sen), sigma=0.5, maxiters=1, masked=True).mask

    sen_fit = spl(wav)

    # Plot
    print('[Sensitivity function] Plot fitted sensitivity function', end='')
    fig, ax = plt.subplots(2, 1, figsize=(12, 12))
    # Sensitivity function
    ax[0].plot(wav_sen[mask], sen[mask], '+', color='black', ms=10)
    ax[0].plot(wav_sen[~mask], sen[~mask], '+', color='lightgrey', ms=10)
    ax[0].plot(wav, sen_fit, '-', c='red', lw=2)
    # Settings
    ax[0].grid(axis='both', color='0.95', zorder=0)
    ax[0].set_xlim(wav.min(), wav.max())
    ax[0].tick_params(which='major',
                      direction='in',
                      top=True,
                      right=True,
                      length=7,
                      width=1.5,
                      labelsize=18)
    ax[0].set_ylabel('Sensitivity Function', fontsize=22)
    ax[0].set_title('Sensitivity Function', fontsize=24)

    ax[1].plot(wav_sen[mask], (sen - spl(wav_sen))[mask],
               '+',
               color='black',
               ms=10,
               zorder=1)
    ax[1].plot(wav_sen[~mask], (sen - spl(wav_sen))[~mask],
               '+',
               color='lightgrey',
               ms=10,
               zorder=0)
    ax[1].axhline(y=0, c='red', ls='--', zorder=2)
    # Settings
    ax[1].grid(axis='both', color='0.95')
    ax[1].set_xlim(wav.min(), wav.max())
    ax[1].set_ylim((sen - spl(wav_sen))[mask].min() * 0.8,
                   (sen - spl(wav_sen))[mask].max() * 1.2)
    ax[1].tick_params(which='major',
                      direction='in',
                      top=True,
                      right=True,
                      length=7,
                      width=1.5,
                      labelsize=18)
    ax[1].set_xlabel('Wavelength [$\\mathrm{\\AA}$]', fontsize=22)
    ax[1].set_ylabel('Residuals', fontsize=22)
    fig.align_ylabels()

    if save:
        print(' to `figs/Sensitivity_Function.png`')
        plt.savefig('figs/Sensitivity_Function.png', dpi=144)
    else:
        print('')

    if show:
        print('[Sensitivity function] Show plot')
        plt.show()
    plt.close()

    if inverse:
        sen_fit = sen_fit[::-1]
        wav = wav[::-1]
        cnt = cnt[::-1]

    # Write to file
    if not os.path.exists('cal'): os.makedirs('cal')
    file_path = F'cal/SensFunc.dat'
    print(
        F'[Sensitivity function] Write sensitivity function to `{file_path}`')
    np.savetxt(file_path, np.vstack([wav, sen_fit]).T, fmt='%15.8e')

    return sen_fit
Пример #40
0
#%%
bias = combine(bias_list,       # ccdproc does not accept numpy.ndarray, but only python list.
               method='median',         # default is average so I specified median.
               unit='adu')              # unit is required: it's ADU in our case.
#print('bias', bias.data.dtype, bias.data.max(), bias.data.min(), bias )
bias.write(dir_name+f_name_bias, overwrite =True)

#%%
dark0 = combine(dark_list,       # ccdproc does not accept numpy.ndarray, but only python list.
               method='median',         # default is average so I specified median.
               unit='adu')   
dark0.write(dir_name+f_name_dark0, overwrite =True)
print('dark0', dark0.data.min(), dark0.data.max(), dark0)
dark = ccd_process(dark0, master_bias=bias) 
print('dark', dark.data.min(), dark.data.max(), dark.data)
dark_clip = sigma_clip(dark) 
print('dark_clip', dark_clip.data.min(), dark_clip.data.max(), dark_clip.data)
dark_clip.fill_value = np.median(dark_clip.data) 
dark.data = dark_clip.filled() # ~.filled() gives the "data array using filled_value"
print('dark', dark.data.dtype, dark.data.min(), dark.data.max(), dark.data)
#dark.write(dir_name+f_name_dark, overwrite =True)

#%%
for chl in[''] :
    
    flat_list = sorted(glob(os.path.join(dir_name, 'flat-'+chl+'*.fit')))

    flat0 = combine(flat_list,       # ccdproc does not accept numpy.ndarray, but only python list.
                   method='median',         # default is average so I specified median.
                   unit='adu')  
    flat0.write(dir_name+f_name_flat0[:-4]+'_'+chl+'.fit', overwrite =True)
Пример #41
0
def find_resolution(multispec_fname,
                    initial_fwhm=.05,
                    usepercentile=True,
                    percentiles=[60, 80, 95],
                    Rguess=None,
                    full_output=False,
                    useclip=True,
                    findpeak=False,
                    makeplot=True):
    """
    """
    from .spectrum import Spectrum1D
    from astropy.stats import sigma_clip, biweight
    from ..robust_polyfit import gaussfit
    from ..utils import find_distribution_peak
    import time
    arcs = Spectrum1D.read(multispec_fname, flux_ext=4)
    line_centers = np.loadtxt(os.path.dirname(__file__) +
                              "/../data/linelists/thar_list",
                              usecols=0)

    start = time.time()

    alllinefits = []
    allRmed = []
    allRerr = []
    allwmid = []
    allR = []
    for i, arc in enumerate(arcs):
        linefits = []
        wave = arc.dispersion
        flux = arc.flux
        wmin, wmax = wave[0], wave[-1]
        wmid = (wmin + wmax) / 2.
        lcs = line_centers[(line_centers > wmin) & (line_centers < wmax)]
        for lc in lcs:
            fwhm = initial_fwhm
            # get subpiece of arc
            ii = (wave > lc - 5 * fwhm) & (wave < lc + 5 * fwhm)
            _x, _y = wave[ii], flux[ii]
            # guess amplitude, center, sigma
            p0 = [np.max(_y), lc, fwhm / 2.355]
            try:
                popt = gaussfit(_x, _y, p0)
            except:
                pass
            else:
                if popt[0] > 0 and abs(popt[1] - lc) < .05:
                    linefits.append(popt)
        try:
            A, w, s = np.array(linefits).T
        except ValueError:
            print("This order did not have any good lines I guess")
            #allR.append(np.nan); allRmed.append(np.nan); allRerr.append(np.nan); allwmid.append(wmid)
            continue
        alllinefits.append(linefits)
        R = w / (s * 2.355)
        if useclip: R = sigma_clip(R)
        if findpeak:
            if Rguess is None: Rguess = np.nanmedian(R)
            try:
                Rmed = find_distribution_peak(R, Rguess)
            except (ValueError, RuntimeError):
                print("--Could not find peak for arc {:02}".format(i))
                print("--{}".format(sys.exc_info()))
                Rmed = np.median(R)
        elif usepercentile:
            assert len(percentiles) == 3
            Rlo, Rmed, Rhi = np.percentile(R, percentiles)
            #Rerr = max(Rhi-Rmed, Rmed-Rlo)
            Rerr = (Rmed - Rlo, Rhi - Rmed)
        else:
            Rmed = biweight.biweight_location(R)
            Rerr = biweight.biweight_scale(R)
        allR.append(R)
        allRmed.append(Rmed)
        allRerr.append(Rerr)
        allwmid.append(wmid)
    if usepercentile:
        allRerr = np.array(allRerr).T

    if makeplot:
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots()
        ax.errorbar(allwmid, allRmed, yerr=allRerr, fmt='o')
        plt.show()

    if full_output:
        return allRmed, allRerr, allwmid, allR, arcs, alllinefits
    return allRmed, allRerr, allwmid
Пример #42
0
def avsigc(indata, sigma):
    a = stats.sigma_clip(indata, sigma=sigma, iters=3)
    mn = np.median(a[-a.mask])
    lena = np.ma.MaskedArray.count(a)
    return mn, lena
Пример #43
0
def compare_results(file_list, parameter_change_list, bary_starname, orbital_parameters_mult, objects, servaldir, serval_T0_offset):
    """
    Compares result files to find the best rv scatter around literature fit returns change in parameter that yielded best results
    
    file_list: list of `str`
        list containing the file paths of the files to be compared
    parameter_change_list: list of `int`
        list containing the parameter exponent shifts used to create the files in file_list
    orbital_parameters_mult : list of `float`
        orbital_parameters = [K, P, e, omega, T0]
        parameters of the keplerian fit to be used as "true" baseline
    """
    def keplarian_rv_mult(t):
        #Calculates keplarian rv for multiple supplied sets of planet parameters 
        total_rv = 0
        for parameters in orbital_parameters_mult:
            total_rv = total_rv + rv.radial_velocity_M0(t , parameters[0], parameters[1], parameters[2], parameters[3], parameters[4], parameters[5])
        return total_rv
    
    def fit_func(t, T0_offset):
        return keplarian_rv_mult(t + T0_offset)
    
    sigma_list = np.zeros(len(file_list)) + 100 # 100 is a fudge factor
    plots = True
    if plots:
        rec_loop_directory, key_name = os.path.split(os.path.split(file_list[0])[0]) # HACK go  up 2 directories to loop directory
        plot_directory = rec_loop_directory + "/compare_plots"
        os.makedirs(plot_directory, exist_ok = True)
        
        pp =PdfPages(plot_directory +"/"+ key_name  +".pdf")
        fig = plt.figure(figsize=(15, 9), dpi=200)
        mpl.rc('font', size=16)
        plt.clf()
        fig.clf()
        ax1=plt.gca()
    
    for f, fil in enumerate(file_list):
        res = Results_ws(wobble_file = fil
                 , serval_dir = servaldir
                 , carmenes_object_ID = objects[0][1]
                 , bary_starname = bary_starname
                 , load_bary = False
                 , archive = False)
        res.apply_corrections()
        w_RVs_barycorr = res.w_RVs_barycorr
        w_dates = res.w_dates
        w_RVs_er = res.w_RVs_er
        #skip fitting test RV curve to wobble data. Just use SERVAL T0 offset
        T0_offset = serval_T0_offset
        sigma_wob = np.nanstd(sigma_clip(
        w_RVs_barycorr - np.nanmean(w_RVs_barycorr) - fit_func(w_dates, T0_offset)
        ,sigma = 5))
        sigma_list[f] = sigma_wob

        sigma_wob_noclip = np.nanstd(
        w_RVs_barycorr - np.nanmean(w_RVs_barycorr) - fit_func(w_dates, T0_offset)
        )
        
        if plots:
            T0_offset_s = T0_offset
            ser_avcn  = res.ser_avcn
            sigma_ser = np.nanstd(sigma_clip(
            ser_avcn[:,1] - np.nanmean(ser_avcn[:,1]) - fit_func(ser_avcn[:,0], T0_offset_s)
            ,sigma = 5) )
            
            sigma_ser_noclip = np.nanstd(
            ser_avcn[:,1] - np.nanmean(ser_avcn[:,1]) - fit_func(ser_avcn[:,0], T0_offset_s)
            )
            
            sigma_wob_Soffset = np.nanstd(sigma_clip(
            w_RVs_barycorr - np.nanmean(w_RVs_barycorr) - fit_func(w_dates, T0_offset_s)
            ,sigma = 5))
            sigma_list[f] = sigma_wob
            
            sigma_wob_noclip_Soffset = np.nanstd(
            w_RVs_barycorr - np.nanmean(w_RVs_barycorr) - fit_func(w_dates, T0_offset_s)
            )
            
            xlst = np.linspace(w_dates[0], w_dates[0] + orbital_parameters_mult[0][1]*0.99999, num=100)
            ylst = [fit_func(t, T0_offset) for t in xlst]
            #sort by xlst
            pltlst = [[xlst[j],ylst[j]] for j in range(len(xlst))]
            def mod_sort(elem):
                return elem[0] % orbital_parameters_mult[0][1]
            pltlst = sorted(pltlst, key = mod_sort)
            pltlst = np.asarray(pltlst)
            pltlst = [pltlst[:,0],pltlst[:,1]]
            
            T0_source = "SERVAL "
            
            ax1.plot(pltlst[0] % orbital_parameters_mult[0][1], pltlst[1], "r-", label = "literature orbit (" + T0_source + "T0_offset)")
            ax1.errorbar((w_dates) % orbital_parameters_mult[0][1], (w_RVs_barycorr-np.nanmean(w_RVs_barycorr)), yerr = w_RVs_er,fmt = "x", label="Wobble_Corr, clipped_sigma = {0:.3f}, noclip = {1:.3f} ".format(sigma_wob, sigma_wob_noclip))
            ax1.errorbar((ser_avcn[:,0]) % orbital_parameters_mult[0][1], ser_avcn[:,1] - np.nanmean(ser_avcn[:,1]),yerr = ser_avcn[:,2] ,fmt = "x", label= "SERVAL_Corr, clipped_sigma = {0:.3f}, noclip = {1:.3f}".format(sigma_ser, sigma_ser_noclip), color = "C2")
            ax1.plot([], [], ' ', label="Wobble_Corr_SERVAL_fit, clipped_sigma = {0:.3f}, noclip = {1:.3f} ".format(sigma_wob_Soffset, sigma_wob_noclip_Soffset))
            ax1.set_ylabel("RVs [m/s]")
            ax1.set_xlabel('jd')
            # add the parameter change to the title
            title_pre = os.path.split(os.path.split(fil)[0])[1]
            plt.title(title_pre + ", Phased ("+str(orbital_parameters_mult[0][1])+"d) RVs for "+objects[0][0]+" ("+objects[0][2]+") "+" - "+objects[0][1]+";")
            plt.grid(True)
            plt.tight_layout()
            plt.legend(shadow=True)
            plt.savefig(pp, format='pdf')
            plt.clf()
            fig.clf()
            ax1 = plt.gca()
        
        
    
    if plots:# include some nice progress plots. TODO make it not crudely placed inside this function?
        plt.close(fig)
        pp.close()
    
    best_index = np.argmin(sigma_list)
    return parameter_change_list[best_index]
Пример #44
0
    def correct(self,
                time,
                flux,
                centroid_col,
                centroid_row,
                polyorder=5,
                niters=3,
                bins=15,
                windows=1,
                sigma_1=3.,
                sigma_2=5.,
                restore_trend=False):
        """Returns a systematics-corrected LightCurve.

        Note that it is assumed that time and flux do not contain NaNs.

        Parameters
        ----------
        time : array-like
            Time measurements
        flux : array-like
            Data flux for every time point
        centroid_col, centroid_row : array-like, array-like
            Centroid column and row coordinates as a function of time
        polyorder : int
            Degree of the polynomial which will be used to fit one
            centroid as a function of the other.
        niters : int
            Number of iterations of the aforementioned algorithm.
        bins : int
            Number of bins to be used in step (6) to create the
            piece-wise interpolation of arclength vs flux correction.
        windows : int
            Number of windows to subdivide the data.  The SFF algorithm
            is ran independently in each window.
        sigma_1, sigma_2 : float, float
            Sigma values which will be used to reject outliers
            in steps (6) and (2), respectivelly.
        restore_trend : bool
            If `True`, the long-term trend will be added back into the
            lightcurve.

        Returns
        -------
        corrected_lightcurve : LightCurve object
            Returns a corrected lightcurve object.
        """
        timecopy = time
        time = np.array_split(time, windows)
        flux = np.array_split(flux, windows)
        centroid_col = np.array_split(centroid_col, windows)
        centroid_row = np.array_split(centroid_row, windows)

        flux_hat = np.array([])
        # The SFF algorithm is going to be run on each window independently

        for i in tqdm(range(windows)):
            # To make it easier (and more numerically stable) to fit a
            # characteristic polynomial that describes the spacecraft motion,
            # we rotate the centroids to a new coordinate frame in which
            # the dominant direction of motion is aligned with the x-axis.
            self.rot_col, self.rot_row = self.rotate_centroids(
                centroid_col[i], centroid_row[i])
            # Next, we fit the motion polynomial after removing outliers
            self.outlier_cent = sigma_clip(data=self.rot_col,
                                           sigma=sigma_2).mask
            with warnings.catch_warnings():
                # ignore warning messages related to polyfit being poorly conditioned
                warnings.simplefilter("ignore", category=np.RankWarning)
                coeffs = np.polyfit(self.rot_row[~self.outlier_cent],
                                    self.rot_col[~self.outlier_cent],
                                    polyorder)

            self.poly = np.poly1d(coeffs)
            self.polyprime = np.poly1d(coeffs).deriv()

            # Compute the arclength s.  It is the length of the polynomial
            # (fitted above) that describes the typical motion.
            x = np.linspace(np.min(self.rot_row[~self.outlier_cent]),
                            np.max(self.rot_row[~self.outlier_cent]), 10000)
            self.s = np.array(
                [self.arclength(x1=xp, x=x) for xp in self.rot_row])

            # Next, we find and apply the correction iteratively
            self.trend = np.ones(len(time[i]))
            for n in range(niters):
                # First, fit a spline to capture the long-term varation
                # We don't want to fit the long-term trend because we know
                # that the K2 motion noise is a high-frequency effect.
                self.bspline = self.fit_bspline(time[i], flux[i])
                # Remove the long-term variation by dividing the flux by the spline
                iter_trend = self.bspline(time[i] - time[i][0])
                self.normflux = flux[i] / iter_trend
                self.trend *= iter_trend
                # Bin and interpolate normalized flux to capture the dependency
                # of the flux as a function of arclength
                self.interp = self.bin_and_interpolate(self.s,
                                                       self.normflux,
                                                       bins,
                                                       sigma=sigma_1)
                # Correct the raw flux
                corrected_flux = self.normflux / self.interp(self.s)
                flux[i] = corrected_flux

            if restore_trend:
                flux[i] *= self.trend
            flux_hat = np.append(flux_hat, flux[i])

        return LightCurve(time=timecopy, flux=flux_hat)
Пример #45
0
    def get_zp_shift( self, order, show, save ):
        '''
        A method to get zeropoint shift along dispersion axis.

        Parameters
        ----------
        order : int
            Order used in polyfit.
        show : bool
            If `True`, the plot will be shown.
        save : bool
            If `True`, the plot will be written to file.

        Returns
        -------
        self.shift : array_like
            Fitted zeropoint shift.
        '''

        # Derive zeropoint shift
        # ----------------------
        shift = np.array([])
        for k, img in enumerate( self.imgs ):

            if self.slit_along == 'col': data = img * 1
            if self.slit_along == 'row': data = img.T

            # Find peaks
            # ----------
            print( F'[Zeropoint correction] Seeking for peaks in the {k+1}/{self.num} images' )
            peaks, properties = find_peaks( data.mean( axis = 0 ) / data.mean( axis = 0 ).max(), 
                                            height = ( 0.3, 0.8 ), 
                                            distance = data.shape[1] // 10,
                                            width = 0 )
            # Print peak info.
            tab = PrettyTable( hrules = HEADER, vrules = NONE )
            tab.field_names = [ 'CENTER', 'HEIGHT', 'WIDTH' ]
            for i, peak in enumerate( peaks ):
                tab.add_row([ peak, round( properties['peak_heights'][i], 2 ), int( properties['widths'][i] ) ])
            print( '\n' + tab.get_string() + '\n' )

            # Gaussian fitting
            x = np.arange( data.shape[1] )
            mu = np.zeros([ data.shape[0], len( peaks ) ])
            for i in range( data.shape[0] ):
                print( F'\r[Zeropoint correction] Peak center fitting in the ({i+1}/{data.shape[0]})-th row of {k+1}/{self.imgs.shape[0]} images', end = '', flush = True )
                for j, peak in enumerate( peaks ):
                    idxmin, idxmax = peak - int( properties['widths'][j] ), peak + int( properties['widths'][j] )
                    popt, pcov = curve_fit( Gaussian, 
                                            x[idxmin:idxmax], 
                                            data[i, idxmin:idxmax], 
                                            bounds = ( [data[i, idxmin:idxmax].max()*0.5, x[idxmin:idxmax].min(), 0 ], 
                                                       [data[i, idxmin:idxmax].max()*1.5, x[idxmin:idxmax].max(), x[idxmin:idxmax].shape[0] ] ) )
                    mu[i, j] = popt[1]
            # Convert to relative shift
            shift = np.hstack([ shift.reshape( mu.shape[0], -1 ), ( mu - mu[mu.shape[0]//2] ) ])
            print( '' )
        # Mean
        shift_mean = shift.mean( axis = 1 )

        # Polyfit to shift curve
        # ----------------------
        print( F'[Zeropoint correction] Fitting zeropoint shift iteratively (order = {order}, 5 iterations, nsigma = 1)' )
        y = np.arange( shift_mean.shape[0] ) + 1
        mask = np.ones( shift_mean.shape[0], dtype = bool )
        for k in range( 5 ):
            knots = np.r_[ ( y[mask][0], ) * ( order + 1 ), ( y[mask][-1], ) * ( order + 1 ) ]
            spl = make_lsq_spline( y[mask], shift_mean[mask], t = knots, k = order )
            mask = mask & ~sigma_clip( shift_mean - spl( y ), sigma = 1, maxiters = 1, masked = True ).mask
#             p = np.poly1d( np.polyfit( y[mask], shift_mean[mask], order ) )
#             shift_fit = p( y )
#             mask = mask & ( ~sigma_clip( shift_mean - shift_fit, sigma = 1, maxiters = 1, masked = True ).mask )
        self.shift = spl( y )

        # Write to file
        # -------------
        if not os.path.exists( 'bak' ): os.makedirs( 'bak' )
        np.savetxt( 'bak/zp_shift.dat', self.shift, fmt = '%15.8e' )

        # Plot
        # ----
        fig, ax = plt.subplots( 2, 1, figsize = ( 10, 8 ) )
        fig.subplots_adjust( hspace = 0 )
        
        # Shifts
        for i in range( shift.shape[1] ):
            ax[0].plot( y, shift[:, i], 'r+' )
        ax[0].plot( y[~mask], shift_mean[~mask], '+', c = 'grey' )
        ax[0].plot( y[mask], shift_mean[mask], 'k+' )
        # Fitted shifts
        ax[0].plot( self.shift, '-', c = 'yellow', lw = 2 )
        # Settings
        ax[0].set_xlim( y.min(), y.max() )
        ax[0].tick_params( which = 'major', direction = 'in', top = True, right = True, length = 7, width = 1.5, labelsize = 18 )
        ax[0].set_xticklabels([])
        ax[0].set_ylabel( 'Displacement [px]', fontsize = 22 )
        ax[0].set_title( 'Zeropoint Shift Curve Fitting', fontsize = 24 )
        
        # Residuals
        ax[1].plot( y[mask],  shift_mean[mask]  - self.shift[mask], 'k+' )
        ax[1].plot( y[~mask], shift_mean[~mask] - self.shift[~mask], '+', c = 'grey' )
        # Settings
        ax[1].axhline( y = 0, ls = '--', c = 'yellow', lw = 2 )
        ax[1].set_xlim( y.min(), y.max() )
        ax[1].tick_params( which = 'major', direction = 'in', top = True, right = True, length = 7, width = 1.5, labelsize = 18 )
        ax[1].set_xlabel( 'Slit', fontsize = 22 )
        ax[1].set_ylabel( 'Residuals [px]', fontsize = 22 )
        fig.align_ylabels()
        
        if save:
            fig_path = 'figs'
            print( F'[Zeropoint correction] Plot zeropoint shift curve fitting to `{ os.path.join( fig_path, "Zeropoint_shift_fitting.png" ) }`' )
            plt.savefig( os.path.join( fig_path, 'Zeropoint_shift_curve_fitting.png' ), dpi = 144 )
        if show:
            print( F'[Zeropoint correction] Show plot' )
            plt.show()
        plt.close()

        return deepcopy( self.shift )
Пример #46
0
def make_fov_image(fov, pngfn=None, **kwargs):
    stretch = kwargs.get('stretch', 'linear')
    interval = kwargs.get('interval', 'zscale')
    imrange = kwargs.get('imrange')
    contrast = kwargs.get('contrast', 0.25)
    ccdplotorder = ['CCD2', 'CCD4', 'CCD1', 'CCD3']
    if interval == 'rms':
        try:
            losig, hisig = imrange
        except:
            losig, hisig = (2.5, 5.0)
    #
    cmap = kwargs.get('cmap', 'viridis')
    cmap = plt.get_cmap(cmap)
    cmap.set_bad('w', 1.0)
    w = 0.4575
    h = 0.455
    rc('text', usetex=False)
    fig = plt.figure(figsize=(6, 6.5))
    cax = fig.add_axes([0.1, 0.04, 0.8, 0.01])
    ims = [fov[ccd]['im'] for ccd in ccdplotorder]
    allpix = np.ma.array(ims).flatten()
    stretch = {
        'linear': vis.LinearStretch(),
        'histeq': vis.HistEqStretch(allpix),
        'asinh': vis.AsinhStretch(),
    }[stretch]
    if interval == 'zscale':
        iv = vis.ZScaleInterval(contrast=contrast)
        vmin, vmax = iv.get_limits(allpix)
    elif interval == 'rms':
        nsample = 1000 // nbin
        background = sigma_clip(allpix[::nsample], iters=3, sigma=2.2)
        m, s = background.mean(), background.std()
        vmin, vmax = m - losig * s, m + hisig * s
    elif interval == 'fixed':
        vmin, vmax = imrange
    else:
        raise ValueError
    norm = ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch)
    for n, (im, ccd) in enumerate(zip(ims, ccdplotorder)):
        if im.ndim == 3:
            im = im.mean(axis=-1)
        x = fov[ccd]['x']
        y = fov[ccd]['y']
        i = n % 2
        j = n // 2
        pos = [0.0225 + i * w + i * 0.04, 0.05 + j * h + j * 0.005, w, h]
        ax = fig.add_axes(pos)
        _im = ax.imshow(im,
                        origin='lower',
                        extent=[x[0, 0], x[0, -1], y[0, 0], y[-1, 0]],
                        norm=norm,
                        cmap=cmap,
                        interpolation=kwargs.get('interpolation', 'nearest'))
        if fov['coordsys'] == 'sky':
            ax.set_xlim(x.max(), x.min())
        else:
            ax.set_xlim(x.min(), x.max())
        ax.set_ylim(y.min(), y.max())
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        if n == 0:
            cb = fig.colorbar(_im, cax, orientation='horizontal')
            cb.ax.tick_params(labelsize=9)
    tstr = fov.get('file', '') + ' ' + fov.get('objname', '')
    title = kwargs.get('title', tstr)
    title = title[-60:]
    fig.text(0.5, 0.99, title, ha='center', va='top', size=12)
    if pngfn is not None:
        plt.savefig(pngfn)
        plt.close(fig)
Пример #47
0
def plot_stats(ax,
               datas,
               percentiles=[1, 99],
               N_extrema=None,
               scale='log',
               **kwargs):
    ''' Plot statistical summary similar but not identical to Box plots.
    If you do Boxplot from matplotlib for 10 MB data, say, there can be too many
    outliers to plot on the graph which makes the plotting to take a lot of
    time.

    Example
    -------
    >>> bias = CCDData.read("bias_bin11.fits")
    >>> dark_1s = CCDData.read("pdark_300s_27C_bin11.fits").data / 300
    >>> percentiles = [0.1, 1, 5, 95, 99, 99.9]
    >>> f, ax = plt.subplots(1, 1, figsize=(8,8))
    >>> ax, clipped_stats = stats.plot_stats(ax, [bias, dark_1s], percentiles=percentiles, N_extrema=5)
    >>> ax.set_ylabel("ADU")
    >>> ax.set_xticks(np.arange(4))
    >>> ax.set_xticklabels(['', 'Bias', 'Dark_1s', ''])
    >>> plt.tight_layout()
    >>> plt.savefig('bias_dark_analysis.png')
    '''

    datas = np.atleast_1d(datas)
    clipped_stats = []
    for i, data in enumerate(datas):
        x = i + 1
        s = give_stats(data=data, percentiles=percentiles, N_extrema=N_extrema)

        data_clipped = sigma_clip(data, **kwargs)
        n_rej = np.count_nonzero(data_clipped.mask)
        c_avg = np.mean(data_clipped)
        c_med = np.median(data_clipped)
        c_std = np.std(data_clipped, ddof=1)
        clipped_stats.append([c_avg, c_med, c_std])

        # avg, med, std from sigma-clipped data
        ax.errorbar(x,
                    c_avg,
                    yerr=c_std,
                    marker='_',
                    capsize=10,
                    markersize=10,
                    label=f"sig-clipped:\nN = {s['N'] - n_rej}")
        ax.scatter(x, c_med, color='r', marker='x')

        # percentiles, extrema, and zscale from original data
        ax.scatter([x - 0.1] * len(percentiles),
                   s["percentiles"],
                   color='k',
                   marker='>')
        ax.scatter([x + 0.1] * 2, [s["zmin"], s["zmax"]],
                   color='r',
                   marker='<')
        ax.plot(x, s["max"], color='k', marker='_', ms=20)
        ax.plot(x, s["min"], color='k', marker='_', ms=20)

        if N_extrema is not None:
            ax.scatter([x] * N_extrema, s["ext_lo"], color='k', marker='x')
            ax.scatter([x] * N_extrema, s["ext_hi"], color='k', marker='x')

    ax.set_xlim(0, len(datas) + 1)
    ax.set_yscale(scale)
    ax.legend()

    return ax, clipped_stats
            print(len(aligned_images), 'images in channel', img_chl,
                  'are aligned')
            #combine image using algned_images:
            mean_image = np.mean(aligned_images,
                                 axis=0).astype(dtype=np.uint16)
            cv2.imwrite(dir_name + '/mean_image_' + img_chl + '.png',
                        mean_image)
            print('Succeed in combining', len(imgs_to_align),
                  'images on channel', img_chl, '(average)')

            median_image = np.median(aligned_images,
                                     axis=0).astype(dtype=np.uint16)
            cv2.imwrite(dir_name + '/median_image_' + img_chl + '.png',
                        median_image)
            print('Succeed in combining', len(imgs_to_align),
                  'images on channel', img_chl, '(median)')

            sigma_clip_image = sigma_clip(aligned_images, sigma=3, \
                        sigma_lower=None, sigma_upper=None, iters=5, axis=None, copy=True)
            cv2.imwrite(dir_name + '/sigma_clip_image_' + img_chl + '.png',
                        sigma_clip_image[0])
            print('Succeed in combining', len(imgs_to_align),
                  'images on channel', img_chl, '(sigma clip)')

        except Exception as err:
            print('Error messgae .......')
            print(err)

else:
    print('There is no images for alignment')
Пример #49
0
def norm_spectrum(spec, median_window=3, order=3):
    '''
    Normalize a spectrum

    Parameters:
    -----------
    spec:           specutils.Spectrum1D
        Spectrum to normalize
    median_window:  int()
        Window in Pixel used in median smoothing
    order:          int()
        Order of the polynomial used to find the continuum

    Returns:
    --------
    norm_spec:      specutils.Spectrum1D
        Normalized spectrum

    '''
    #   Regions that should not be used for continuum estimation,
    #   such as broad atmospheric absorption bands
    exclude_regions = [
        SpectralRegion(4295. * u.AA, 4315. * u.AA),
        #SpectralRegion(6860.* u.AA, 6880.* u.AA),
        SpectralRegion(6860. * u.AA, 6910. * u.AA),
        #SpectralRegion(7590.* u.AA, 7650.* u.AA),
        SpectralRegion(7590. * u.AA, 7680. * u.AA),
        SpectralRegion(9260. * u.AA, 9420. * u.AA),
        #SpectralRegion(11100.* u.AA, 11450.* u.AA),
        #SpectralRegion(13300.* u.AA, 14500.* u.AA),
    ]

    #   First estimate of the continuum
    #   -> will be two for late type stars because of the many absorption lines
    #   -> to limit execution time use simple LinearLSQFitter()
    #       -> reduces normalization accuracy
    _cont = fit_generic_continuum(
        spec,
        model=models.Chebyshev1D(order),
        fitter=fitting.LinearLSQFitter(),
        median_window=median_window,
        exclude_regions=exclude_regions,
    )(spec.spectral_axis)

    #   Normalize spectrum
    norm_spec = spec / _cont

    #   Sigma clip the normalized spectrum to rm spectral lines
    clip_flux = sigma_clip(
        norm_spec.flux,
        sigma_lower=1.25,
        sigma_upper=3.,
        axis=0,
        grow=1.,
    )

    #   Calculate mask
    mask = np.invert(clip_flux.recordmask)

    #   Make new spectrum
    spec_mask = Spectrum1D(
        spectral_axis=spec.spectral_axis[mask],
        flux=spec.flux[mask],
    )

    # Determine new continuum
    _cont = fit_generic_continuum(
        spec_mask,
        model=models.Chebyshev1D(order),
        fitter=fitting.LinearLSQFitter(),
        median_window=median_window,
        exclude_regions=exclude_regions,
    )(spec.spectral_axis)

    #   Normalize spectrum again
    norm_spec = spec / _cont

    return norm_spec, mad_std(norm_spec.flux)
Пример #50
0
def bias_checks(bias, overscan=False):
    i = 0
    rv = np.zeros(1,
                  dtype=[('fileName', 'S35'), ('sliceMeanAdu', 'f4', (16, )),
                         ('sliceRmsAdu', 'f4', (16, )),
                         ('sliceRangeAdu', 'f4', (16, )),
                         ('dropFlag', 'i4', (16, )),
                         ('residualMeanAdu', 'f4', (16, )),
                         ('residualRmsAdu', 'f4', (16, ))])
    fn = os.path.basename(bias).replace('.fz', '').replace('.fits', '')
    print 'checking ', fn
    rv['fileName'][i] = fn
    fits = _open_fits(bias)
    if len(fits[1:]) != 16:
        print 'ERROR: %s has %d img extensions' % (fn, len(fits[1:]))
        return rv
    for j, hdu in enumerate(fits[1:]):
        imNum = 'IM%d' % ampOrder[j]
        try:
            data = hdu.read().astype(np.float32)
            hdr = hdu.read_header()
        except:
            print 'ERROR: failed to read %s[%d]' % (fn, j + 1)
            continue
        if overscan:
            _data, oscan_cols, oscan_rows = extract_overscan(data, hdr)
            #colbias = fit_overscan(oscan_cols,**kwargs)
            if oscan_rows is not None:
                rowbias = fit_overscan(oscan_rows,
                                       along='rows',
                                       method='cubic_spline')
                cslice = sigma_clip(data[-22:-2:, 2:-22],
                                    iters=1,
                                    sigma=3.0,
                                    axis=0)
            else:
                cslice = None  # No row overscan to check
            bottomslice = data[5:10, -16:-2].mean(axis=0)
            middleslice = data[100:110, -16:-2].mean(axis=0)
        else:
            cslice = sigma_clip(data[1032:1048, 2:-22],
                                iters=1,
                                sigma=3.0,
                                axis=0)
            bottomslice = data[5:10, 1000:1014].mean(axis=0)
            middleslice = data[100:110, 1000:1014].mean(axis=0)
        if cslice is not None:
            cslice = cslice.mean(axis=0)
            cslice = gaussian_filter(cslice, 17)
            rv['sliceMeanAdu'][i, j] = cslice.mean()
            rv['sliceRmsAdu'][i, j] = cslice.std()
            rv['sliceRangeAdu'][i, j] = cslice.max() - cslice.min()
        if np.median(middleslice - bottomslice) > 15:
            print 'found drop in ', bias, j
            rv['dropFlag'][i, j] = 1
        bias_residual = overscan_subtract(data, hdr)
        s = stats_region('amp_central_quadrant')
        mn, sd = array_stats(bias_residual[s],
                             method='mean',
                             rms=True,
                             clip_sig=5.0,
                             clip_iters=2)
        rv['residualMeanAdu'][i, j] = mn
        rv['residualRmsAdu'][i, j] = sd
    return rv
Пример #51
0
spectrum_id = read["spectrum_id"]
order = read["order"]

# array that specifies if a pixel is already covered.
# to start, it should be all False
covered = np.zeros((len(wl),), dtype='bool')

# #average all of the spectra in the deque together
# residual_array = np.array(self.resid_deque)
# if len(self.resid_deque) == 0:
#     raise RuntimeError("No residual spectra stored yet.")
# else:
#     residuals = np.average(residual_array, axis=0)

# run the sigma_clip algorithm until converged, and we've identified the outliers
filtered_data = sigma_clip(residuals, sig=args.sigma, iters=None)
mask = filtered_data.mask

# sigma0 = config['region_priors']['sigma0']
# logAmp = config["region_params"]["logAmp"]
# sigma = config["region_params"]["sigma"]

# Sort in decreasing strength of residual
nregions = 0
mus = []

for w, resid in sorted(zip(wl[mask], np.abs(residuals[mask])), key=itemgetter(1), reverse=True):
    if w in wl[covered]:
        continue
    else:
        # check to make sure region is not *right* at the edge of the echelle order
def compare_results(file_list, parameter_change_list, bary_starname,
                    orbital_parameters, objects, servaldir):
    """
    Compares result files to find the best rv scatter around literature fit returns change in parameter that yielded best results
    
    file_list: list of `str`
        list containing the file paths of the files to be compared
    parameter_change_list: list of `int`
        list containing the parameter exponent shifts used to create the files in file_list
    orbital_parameters : list of `float`
        orbital_parameters = [K, P, e, omega, T0]
        parameters of the keplerian fit to be used as "true" baseline
    """
    sigma_list = np.zeros(len(file_list)) + 100  # 100 is a fudge factor
    for f, fil in enumerate(file_list):
        #assumes order of file_listand parameter_change_list are matched. (maybe extract from file name?)
        wobble_res = h5py.File(fil, 'r')
        w_dates = wobble_res['dates'][()]
        w_dates_utc = wobble_res['dates_utc'][()]

        w_RVs = wobble_res['star_time_rvs'][()]
        w_RVs_original = w_RVs
        w_RVs_er = wobble_res['star_time_sigmas'][()]

        #barycorr for wobble_orig
        from scipy.constants import codata
        lightvel = codata.value('speed of light in vacuum')  #for barycorr
        # CAHA Coordinates for barycorr
        _lat = 37.2236
        _lon = -2.54625
        _elevation = 2168.

        w_RVs_original_barycorr = np.zeros(len(w_dates))
        for n in tqdm(range(len(w_RVs_original_barycorr))):
            w_RVs_original_barycorr[n] = bary.get_BC_vel(
                w_dates_utc[n],
                starname=bary_starname,
                lat=_lat,
                longi=_lon,
                alt=_elevation,
                zmeas=w_RVs_original[n] / lightvel)[0]

        #Serval Correction
        #read in SERVAL
        ser_rvc = np.loadtxt(servaldir + objects[1] + "/" + objects[1] +
                             ".rvc.dat")
        # remove entries with nan in drift
        ind_finitedrift = np.isfinite(ser_rvc[:, 3])
        ser_rvc = ser_rvc[ind_finitedrift]
        ser_corr = -ser_rvc[:, 8] - ser_rvc[:, 3]
        #match wobble and serval
        indices_serval = []
        indices_wobble = []
        for n in range(len(w_dates)):
            ind_jd = np.where(
                np.abs(ser_rvc[:, 0] - w_dates[n]) == np.nanmin(
                    np.abs(ser_rvc[:, 0] - w_dates[n])))[0][0]
            if (ser_rvc[ind_jd, 0] - w_dates[n]
                ) * 24 * 60 < 20.:  #only takes matches closer than 20 minutes
                indices_serval.append(ind_jd)
                indices_wobble.append(n)
        print("#serval_ind:" + str(len(indices_serval)),
              "#wobble_ind:" + str(len(indices_wobble)))
        #now set up all the data according to the indices
        ser_rvc = ser_rvc[indices_serval]
        ser_corr = ser_corr[indices_serval]

        w_dates = w_dates[indices_wobble]
        w_dates_utc = w_dates_utc[indices_wobble]
        w_RVs_original_barycorr = w_RVs_original_barycorr[
            indices_wobble] + ser_corr
        w_RVs_er = w_RVs_er[indices_wobble]

        def fit_func(t, T0_offset):
            return rv.radial_velocity(t, orbital_parameters[0],
                                      orbital_parameters[1],
                                      orbital_parameters[2],
                                      orbital_parameters[3],
                                      orbital_parameters[4] + T0_offset)

        #fit to Wobble
        xdata = w_dates
        ydata = w_RVs_original_barycorr - np.nanmean(w_RVs_original_barycorr)
        popt, pcov = sp.optimize.curve_fit(fit_func,
                                           xdata,
                                           ydata,
                                           sigma=w_RVs_er,
                                           absolute_sigma=True)
        print("T0_offset Wobble = ", popt)
        T0_offset = popt[0]

        #make these weighted (maybe: thsi may not be a good idea if residuals are not strongly correlated to error (as with wobble results))
        sigma_wob = np.nanstd(
            sigma_clip(w_RVs_original_barycorr -
                       np.nanmean(w_RVs_original_barycorr) -
                       fit_func(w_dates, T0_offset),
                       sigma=5))
        sigma_list[f] = sigma_wob

        #TODO include some nice progress plots

    best_index = np.argmin(sigma_list)
    return parameter_change_list[best_index]
Пример #53
0
 def mask_outliers(self, data, n_sigma=3, *args):
     return sigma_clip(data, n_sigma).mask
Пример #54
0
    def trace_mismatch(self, maxsep=None, sigma=None, minmax=None, synced=False):
        """
        Return the mismatches between the mask and trace positions.

        Based on the best-fitting (or fixed) offset and scale
        parameters, :func:`match` is executed, forcing the
        slit-mask and trace positions pairs to be uniquely matched.

        The set of slit-mask positions without a matching trace are
        identified by finding those slits in the range relevant to
        the list of trace coordinates (see `minmax`), but without a
        matching trace index.

        .. todo::
            explain synced adjustment

        The set of mask-to-trace matches are identified as "bad" if
        they meet any of the following criteria:

            - The trace has not been masked (see :attr:`trace_mask`)
            - A unique match could not be found (see :func:`match`)
            - The absolute value of the separation is larger than the
              provided `maxsep` (when `maxsep` is not None).
            - The separation is rejected by a sigma-clipping (see
              `sigma`)

        Note that there is currently no argument that disables the
        determination of bad traces. However, bad traces are simply
        returned by the method; this function changes none of the
        class attributes.

        Args:
            maxsep (:obj:`float`, optional):
                The maximum allowed separation between the calibrated
                coordinates of the designed slit position in pixels
                and the matched trace. If None, use :attr:`maxsep`;
                see :func:`find_best_match`.
            sigma (:obj:`float`, optional):
                The sigma value to use for rejection. If None, use
                :attr:`sigma`; see :func:`find_best_match`.
            minmax (array-like, optional):
                A two-element array with the minimum and maximum
                coordinate value to match to the trace data. If None,
                this is determined from :attr:`trace_spat` and the
                standard deviation of the fit residuals.
            synced (:obj:`bool`, optional):
                The mask coordinates being matched to are synced
                left-to-right in adjacent pairs. I.e., the indices of
                left edges are all even and the indices of all right
                edges are odd.
        
        Returns:
            Two `numpy.ndarray`_ objects are returned: (1) the
            indices of mask positions without a matching trace
            position and (2) the list of trace positions identified
            as "bad."
        """
        # Check parameters are available
        if self.par is None:
            raise ValueError('No parameters are available.')

        # Set the parameters using the relevant attributes if not provided directly
        if maxsep is None:
            maxsep = self.maxsep
        if sigma is None:
            sigma = self.sigma

        # Get the coordinates and the matching indices: always use the
        # existing parameters and force the matches to be unique.
        _match_coo, _match_separation, _match_index = self.match(unique=True)

        # Selection of positions included in the fit with valid match
        # indices
        gpm = numpy.invert(self.trace_mask) & (_match_index >= 0)

        # Find the overlapping region between the trace and slit-mask
        # coordinates
        if minmax is None:
            stddev = numpy.std(_match_separation[gpm])
            _minmax = numpy.array([numpy.amin(self.trace_spat) - 3*stddev,
                                  numpy.amax(self.trace_spat) + 3*stddev])
        else:
            _minmax = numpy.atleast_1d(minmax)
            if _minmax.size != 2:
                raise ValueError('`minmax` must be a two-element array.')
        overlap = (_match_coo > _minmax[0]) & (_match_coo < _minmax[1])

        # Find any large separations
        bad = self.trace_mask | (_match_index < 0)
        if maxsep is None:
            diff = numpy.ma.MaskedArray(_match_separation, mask=bad)
            kwargs = {} if sigma is None else {'sigma': sigma}
            bad = numpy.ma.getmaskarray(sigma_clip(data=diff, **kwargs))
        else:
            bad[gpm] = numpy.absolute(_match_separation[gpm]) > maxsep

        if synced:
            # Get the union of all indices with good or missing matches
            indx = numpy.array(list(set(numpy.append(numpy.where(overlap)[0],
                                                     _match_index[gpm & numpy.invert(bad)]))))
            # If the coordinate are left-right synchronized, there
            # should be an even number of indices, and the sorted
            # sequence should always have adjacent pairs the are
            # different by one
            unsynced = numpy.where(numpy.diff(indx)[::2] != 1)[0]*2
            if len(unsynced) != 0:
                offset = numpy.ones(len(unsynced), dtype=int)
                offset[indx[unsynced] % 2 == 1] = -1
                overlap[indx[unsynced]+offset] = True
            # Make sure the last index is paired
            if indx[-1] % 2 == 0:
                # Add a right
                overlap[indx[-1]+1] = True

        # Use these to construct the list of missing traces and those
        # that are unmatched to the mask
        return numpy.array(list(set(numpy.where(overlap)[0])
                             - set(_match_index[gpm & numpy.invert(bad)]))), \
                    numpy.where(bad & numpy.invert(self.trace_mask))[0]
Пример #55
0
def find_peaks(flux,
               window=51,
               niter=5,
               clip_iter=5,
               clip_sigma_upper=5.0,
               clip_sigma_lower=5.0,
               detection_sigma=3.0,
               min_peak_dist_sigma=5.0,
               gaussian_width=1.0,
               make_fig=False):
    """
    * Subtract median filter (param "window")
    * Iterate: (param "niter")
        * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower)
        * Find peaks (param detection_sigma)
        * Remove peaks too close to previous (param min_peak_dist_sigma)
        * Fit Gaussians to peaks (initialize width at param gaussian_width)
    Returns:
        allpeakx: locations of peaks
        fullmodel: the model of all the gaussians
        If make_fig=True: fig, a plot showing all the peaks found at each iteration.
    """
    # This is the data we will try to fit with a
    # combination of Gaussians
    xarr = np.arange(len(flux))
    flux = flux - signal.medfilt(flux, window)
    continuum = models.Linear1D(slope=0, intercept=0)
    fullmodel = continuum

    allpeakx = []
    allpeaksigma = []

    fitter = fitting.LevMarLSQFitter()
    if make_fig: fig, axes = plt.subplots(niter)
    for iiter in range(niter):
        # Subtract existing peaks
        tflux = flux - fullmodel(xarr)
        # Estimate noise
        cflux = sigma_clip(tflux,
                           iters=clip_iter,
                           sigma_upper=clip_sigma_upper,
                           sigma_lower=clip_sigma_lower)
        noise = np.std(cflux)
        # Find peaks in residual using gradient = 0
        # Only keep peaks above detection threshold
        deriv = np.gradient(tflux)
        peaklocs = (deriv[:-1] >= 0) & (deriv[1:] < 0) & \
            (tflux[:-1] > detection_sigma * noise)
        peakx = np.where(peaklocs)[0]
        peaky = flux[:-1][peaklocs]
        # Prune peaks too close to existing peaks
        peaks_to_keep = np.ones_like(peakx, dtype=bool)
        for ix, x in enumerate(peakx):
            z = (x - np.array(allpeakx)) / np.array(allpeaksigma)
            if np.any(np.abs(z) < min_peak_dist_sigma):
                peaks_to_keep[ix] = False
        peakx = peakx[peaks_to_keep]
        peaky = peaky[peaks_to_keep]

        # Add new peaks to the model
        for x, y in zip(peakx, peaky):
            g = models.Gaussian1D(amplitude=y, mean=x, stddev=gaussian_width)
            fullmodel = fullmodel + g
        print("iter {}: {} peaks (found {}, added {})".format(
            iiter,
            fullmodel.n_submodels() - 1, len(peaks_to_keep), len(peakx)))
        # Fit the full model
        fullmodel = fitter(fullmodel,
                           xarr,
                           flux,
                           maxiter=200 * (fullmodel.parameters.size + 1))
        print(fitter.fit_info["message"], fitter.fit_info["ierr"])
        # Extract peak x and sigma
        peak_x_indices = np.where(
            ["mean_" in param for param in fullmodel.param_names])[0]
        peak_y_indices = peak_x_indices - 1
        peak_sigma_indices = peak_x_indices + 1
        allpeakx = fullmodel.parameters[peak_x_indices]
        allpeaky = fullmodel.parameters[peak_y_indices]
        allpeaksigma = fullmodel.parameters[peak_sigma_indices]
        # Make a plot
        if make_fig:
            try:
                ax = axes[iiter]
            except:
                ax = axes
            ax.plot(xarr, flux)
            ax.plot(peakx, peaky, 'ro')
            ax.plot(xarr, fullmodel(xarr), lw=1)
            ax.axhspan(-noise, +noise, color='k', alpha=.2)
            ax.plot(xarr, flux - fullmodel(xarr))
            ax.vlines(allpeakx,
                      allpeaky * 1.1,
                      allpeaky * 1.1 + 300,
                      color='r',
                      lw=1)
    if make_fig: return allpeakx, fullmodel, fig
    return allpeakx, fullmodel
Пример #56
0
    def findLine(self):

        img = fits.getdata(self.fullPath).astype(np.float32)
        img[img < 0] = 0
        img0 = fits.getdata(self.fullPath0).astype(np.float32)
        img0[img0 < 0] = 0
        imgDiff = img.copy() - img0.copy()
        #imgDiff[imgDiff<0]=0
        #new_hdu = fits.PrimaryHDU(imgDiff)
        #new_hdu.writeto("f:/aa.fit")
        '''
        zimg = img.copy().astype(np.int)
        zmin, zmax = zscale_image(zimg)
        zimg[zimg>zmax] = zmax
        zimg[zimg<zmin] = zmin
        if zmin==zmax:
            self.lines = np.array([])
            return;
        
        zimg=(((zimg-zmin)/(zmax-zmin))*255).astype(np.uint8)
        '''
        '''
        Image.fromarray(zimg).save("f:/aa.png")
        zimg = imgDiff.copy().astype(np.int)
        zmin, zmax = zscale_image(zimg)
        zimg[zimg>zmax] = zmax
        zimg[zimg<zmin] = zmin
        zimg=(((zimg-zmin)/(zmax-zmin))*255).astype(np.uint8)
        Image.fromarray(zimg).save("f:/aa1.png")
        return
        '''

        zimgDiff = imgDiff.copy()
        zimgDiff[zimgDiff < 0] = 0
        zimgDiff = cv2.medianBlur(zimgDiff, 3)  #medianBlur  blur

        tmin = zimgDiff.min()
        tmax = zimgDiff.max()

        if tmin == tmax:
            self.lines = np.array([])
            return

        zimgDiff = (((zimgDiff - tmin) / (tmax - tmin)) * 255.0).astype(
            np.uint8)
        #Image.fromarray(zimgDiff).save(r"E:\work\program\python\OTSimulation\meteor-find\aa1.png")

        self.img = img
        self.zimg = img
        self.imgDiff = imgDiff
        self.zimgDiff = zimgDiff

        self.imgH = img.shape[0]
        self.imgW = img.shape[1]

        #drop image border 20pix
        #zimgDiff[0:self.borderWidth, :] = 0
        #zimgDiff[:, 0:self.borderWidth] = 0
        #zimgDiff[self.imgH-self.borderWidth:self.imgH, :] = 0
        #zimgDiff[:, self.imgW-self.borderWidth:self.imgW] = 0

        imgAvg = np.mean(zimgDiff)
        imgRms = np.std(zimgDiff)
        thred1 = imgAvg + 3 * imgRms
        self.imgAvg1 = imgAvg
        self.imgRms1 = imgRms
        self.thred1 = thred1
        print("avg1=%f;rms1=%f;thred1=%f" % (imgAvg, imgRms, thred1))

        #new_hdu = fits.PrimaryHDU(zimgDiff)
        #new_hdu.writeto("f:/ab.fit")

        timg1 = zimgDiff[zimgDiff > thred1]
        if len(timg1) == 0:
            self.lines = np.array([])
            return

        filtered_data = sigma_clip(timg1, sigma=3, iters=2, copy=False)
        imgbg = filtered_data.data[~filtered_data.mask]

        imgAvg = np.average(imgbg)
        imgRms = np.std(imgbg)
        #thred2 = imgAvg + 3 * imgRms
        thred2 = imgAvg + 2 * imgRms
        self.imgAvg2 = imgAvg
        self.imgRms2 = imgRms
        self.thred2 = thred2
        print("avg1=%f;rms1=%f;thred2=%f" % (imgAvg, imgRms, thred2))

        thred2 = (145 if thred2 > 145 else thred2)

        #flag,bimg = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
        flag, bimg = cv2.threshold(zimgDiff, thred2, 255, cv2.THRESH_BINARY)
        #Image.fromarray(bimg).save("f:/aa1.png")

        rho = 1
        theta = np.pi / 180
        #lines = cv2.HoughLinesP(bimg, rho = rho, theta = theta, threshold = 10,
        #                        minLineLength = 50,maxLineGap = self.maxLineGap)
        #寻找暗的轨迹
        #lines = cv2.HoughLinesP(bimg, rho = rho, theta = theta, threshold = 20,
        #        minLineLength = 50,maxLineGap = 20)
        #寻找亮的轨迹,亮的流星
        lines = cv2.HoughLinesP(bimg,
                                rho=rho,
                                theta=theta,
                                threshold=50,
                                minLineLength=50,
                                maxLineGap=self.maxLineGap)

        while lines is None and thred2 > 100:
            #flag,bimg = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
            flag, bimg = cv2.threshold(zimgDiff, thred2, 255,
                                       cv2.THRESH_BINARY)
            lines = cv2.HoughLinesP(bimg,
                                    rho=rho,
                                    theta=theta,
                                    threshold=50,
                                    minLineLength=50,
                                    maxLineGap=self.maxLineGap)

            if lines is None:
                thred2 = thred2 - 5

        lines = (np.array([]) if lines is None else lines)
        print("thred1: %f thred2: %f lines: %d" %
              (thred1, thred2, lines.shape[0]))

        self.bimg = bimg
        self.lines = lines
Пример #57
0
def rescale_snr(specwave,
                flux=None,
                ivar=None,
                x1=None,
                x2=None,
                cont=None,
                cont_kernel=51,
                cont_Niter=3,
                make_fig=False,
                **sigma_clip_kwargs):
    """
    Take biweight standard deviation of x_i/sigma_i of sigma-clipped data,
    rescale ivar so that standard deviation is 1
    Returns a Spectrum1D object
    """
    if flux is None and ivar is None:
        assert isinstance(specwave, Spectrum1D)
        spec = specwave
        wave, flux, ivar = spec.dispersion, spec.flux, spec.ivar
        meta = spec.metadata
    else:
        wave = specwave
        assert len(wave) == len(flux)
        assert len(wave) == len(ivar)
        meta = OrderedDict({})
    if cont is None:
        cont = fast_find_continuum(flux, cont_kernel, cont_Niter)
    else:
        assert len(cont) == len(flux)
    errs = ivar**-0.5
    errs[errs > 10 * flux] = np.nan

    iirescale = np.ones_like(wave, dtype=bool)
    if x1 is not None: iirescale = iirescale & (wave > x1)
    if x2 is not None: iirescale = iirescale & (wave < x2)

    norm = flux[iirescale] / cont[iirescale]
    normerrs = errs / cont[iirescale]

    z = (norm - 1.) / normerrs
    clipped = sigma_clip(z[np.isfinite(z)], **sigma_clip_kwargs)
    noise = biweight_scale(clipped[~clipped.mask])
    print("Noise is {:.2f} compared to 1.0".format(noise))

    new_ivar = ivar / (noise**2.)

    outspec = Spectrum1D(wave, flux, new_ivar, meta)
    if make_fig:
        newz = z / noise
        newerrs = errs * noise
        newnormerrs = normerrs * noise

        import matplotlib.pyplot as plt
        fig, axes = plt.subplots(2, 3, figsize=(12, 6))
        ax = axes[0, 0]
        ax.plot(wave, flux)
        ax.plot(wave, errs)
        ax.plot(wave, newerrs)
        ax.plot(wave, cont, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('counts')
        ax = axes[1, 0]
        ax.plot(wave, norm)
        ax.plot(wave, normerrs)
        ax.plot(wave, newnormerrs)
        ax.axhline(1, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('norm')
        ax.set_ylim(0, 1.2)
        ax = axes[1, 1]
        ax.plot(wave, z)
        ax.plot([np.nan], [np.nan])  # hack to get the right color
        ax.plot(wave, newz)
        ax.axhline(0, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('z')
        ax.set_ylim(-7, 7)

        ax = axes[0, 1]
        bins = np.linspace(-7, 7, 100)
        binsize = np.diff(bins)[1]
        ax.plot(bins,
                norm_distr.pdf(bins) * np.sum(np.isfinite(z)) * binsize,
                color='k')
        ax.hist(z[np.isfinite(z)], bins=bins)
        ax.hist(clipped[~clipped.mask], bins=bins, histtype='step')
        ax.hist(newz[np.isfinite(newz)], bins=bins, histtype='step')
        ax.set_xlabel('z')
        ax.set_xlim(-7, 7)

        ax = axes[0, 2]
        zfinite = z.copy()
        zfinite[~np.isfinite(zfinite)] = 0.
        autocorr = np.correlate(zfinite, zfinite, mode="same")
        ax.plot(np.arange(len(flux)), autocorr, '.-')
        ax.axvline(len(flux) // 2)
        ax.set_xlim(
            len(flux) // 2 - 10,
            len(flux) // 2 + 10,
        )
        ax.set_xlabel("pixel")
        ax.set_ylabel("autocorrelation(z)")

        z1, z2 = -10, 10
        zarr1 = np.zeros((len(z) - 1, 2))
        zarr1[:, 0] = z[:-1]
        zarr1[:, 1] = z[1:]
        zarr1 = zarr1[np.sum(np.isfinite(zarr1), axis=1) == 2]
        zarr2 = np.zeros((len(z) - 2, 2))
        zarr2[:, 0] = z[:-2]
        zarr2[:, 1] = z[2:]
        zarr2 = zarr2[np.sum(np.isfinite(zarr2), axis=1) == 2]
        #ax = axes[0,2]
        #ax.plot([z1,z2],[z1,z2],'k:')
        #ax.plot(z[:-1], z[1:], '.', alpha=.3)
        #ax.set_title("r={:+.2}".format(pearsonr(zarr1[:,0],zarr1[:,1])[0]))
        #ax.set_xlabel("z(pixel)"); ax.set_ylabel("z(pixel+1)")
        #ax.set_xlim(z1,z2); ax.set_ylim(z1,z2)

        ax = axes[1, 2]
        ax.plot([z1, z2], [z1, z2], 'k:')
        ax.plot(z[:-2], z[2:], '.', alpha=.3)
        ax.set_title("r={:+.2}".format(pearsonr(zarr2[:, 0], zarr2[:, 1])[0]))
        ax.set_xlabel("z(pixel)")
        ax.set_ylabel("z(pixel+2)")
        ax.set_xlim(z1, z2)
        ax.set_ylim(z1, z2)

        fig.tight_layout()

        return fig, outspec, noise

    return outspec, noise
Пример #58
0
        print('Total of ', numtotint, ' integrations for this filter')
        if numtotint < 10:
            numsigma = 5
            meddata3d = np.median(normdata3d, axis=2, keepdims=True)
            datadiffs = normdata3d - meddata3d
            datadiffsdiverr = datadiffs / normerr3d
            datadiffsdiverrmasked = np.ma.masked_greater_equal(
                datadiffsdiverr, numsigma)
            clippeddata3d = np.ma.masked_array(normdata3d,
                                               datadiffsdiverrmasked.mask)
            clippederr3d = np.ma.masked_array(normerr3d, clippeddata3d.mask)
        else:
            numsigma = 3.0
            clippeddata3d = sigma_clip(normdata3d,
                                       sigma=numsigma,
                                       maxiters=2,
                                       axis=2,
                                       cenfunc='median')
            clippederr3d = np.ma.masked_array(normerr3d, clippeddata3d.mask)
        #For data array use mean of clipped array
        meanclippeddata = np.ma.mean(clippeddata3d, axis=2)
        #For error array add errors in quadrature and divide by number of unmasked samples
        meanclippederr = (1.0 / (np.sum(
            (~clippederr3d.mask), axis=2))) * (np.ma.sum(
                (clippederr3d**2), axis=2))**0.5

        #Patch unusual blob regions in some filter images, replacing with the F200W flat that doesn't show them.
        if filterdirlist[l] != 'F200W':
            hdulistf200w = fits.open('jwst_niriss_cv3_imageflat_F200W.fits')
            dataf200w = hdulistf200w['SCI'].data
            #These are x,y positions for photutils
Пример #59
0
def master_ff(flattype, masterb, masterf):
    """ Produces a master flat that has been bias corrected	"""
    flat_fn = []

    for file in glob.glob('*.fits'):
        hdulist = fits.open(file)
        header = hdulist[0].header

        if flattype == 'IMSKY':

            if (header['object'] == 'SKY,FLAT'
                    and header['HIERARCH ESO DPR TECH'] == 'IMAGE'):
                flat_fn.append(file)

        if flattype == 'POLDOME':

            if (header['object'] == 'DOME'
                    and header['HIERARCH ESO DPR TECH'] == 'POLARIMETRY'):
                flat_fn.append(file)

        hdulist.close()

    print("Number of flat frames in directory:", len(flat_fn))
    fstack_data = np.zeros([len(flat_fn), 1024, 1024])
    mblist = fits.open(masterb)
    mbdata = mblist[0].data
    mblist.close()
    i = 0

    for file in flat_fn:
        hdulist = fits.open(file)
        data = hdulist[0].data
        header = hdulist[0].header

        if flattype == 'IMSKY':

            if (np.shape(data) == (1030, 1030) and header['CDELT1'] == 2
                    and header['CDELT1'] == 2):
                bc_data = data[:1024, :1024] - mbdata
                clipped_data = sigma_clip(bc_data,
                                          sigma_upper=3,
                                          sigma_lower=104)
                bc_data[clipped_data.mask == True] = np.nan
                fstack_data[i, :, :] = bc_data
                i += 1

        if flattype == 'POLDOME':

            if (np.shape(data) == (1030, 1030) and header['CDELT1'] == 2
                    and header['CDELT1'] == 2):
                bc_data = data[:1024, :1024] - mbdata
                fstack_data[i, :, :] = bc_data
                i += 1

        hdulist.close()

    print("Number of flat frames used in master flat:", i)

    if i > 0:
        fstack_sum = np.nanmean(fstack_data, axis=0)
        fmedian = np.nanmedian(fstack_sum)
        fstack_sum[np.isnan(fstack_sum)] = fmedian
        fstack_sum[fstack_sum <= 0] = fmedian
        flat_array = fstack_sum / fmedian
        fstd = np.nanstd(flat_array)

        hdu = fits.PrimaryHDU(flat_array)

        if flattype == 'IMSKY':
            hdu.header['object'] = 'MASTER SKY,FLAT'
            hdu.header['obstech'] = 'IMAGE'

        if flattype == 'POLDOME':

            hdu.header['object'] = 'MASTER DOME,FLAT'
            hdu.header['obstech'] = 'POLARIMETRY'

        hdu.header['frames'] = len(flat_fn)
        hdu.header['median'] = 1
        hdu.header['std'] = fstd
        hdu.header['naxis1'] = 1024
        hdu.header['naxis2'] = 1024
        hdu.header['binning'] = 2

        hdulist = fits.HDUList([hdu])
        hdulist.writeto(masterf)
        return 0

    else:
        raise FileNotFoundError("No applicable flat frames")
Пример #60
0
def A_photometry(image_data,
                 bg_err,
                 factor=1,
                 ape_sum=[],
                 ape_sum_err=[],
                 cx=15,
                 cy=15,
                 r=2.5,
                 a=5,
                 b=5,
                 w_r=5,
                 h_r=5,
                 theta=0,
                 shape='Circular',
                 method='center'):
    '''
	Performs aperture photometry, first by creating the aperture (Circular,
	Rectangular or Elliptical), then it sums up the flux that falls into the 
	aperture.

    Parameters
    ==========

    image_data: 3D array 
    	Data cube of images (2D arrays of pixel values).

    bg_err   : 1D array
    	Array of uncertainties on pixel value.

    factor   : float (optional)
    	Electron count to photon count factor. Default is 1 if none given.

    ape_sum  : 1D array (optional)
    	Array of flux to append new flux values to. If 'None', the new values
    	will be appended to an empty array

    ape_sum_err: 1D array (optional)
    	Array of flux uncertainty to append new flux uncertainty values to. If 
    	'None', the new values will be appended to an empty array.

    cx       : float or 1D array (optional)
    	x-coordinate of the center of the aperture. Dimension must be equal to 
    	dimension of cy. Default is 15.

    cy       : float or 1D array (optional)
    	y-coordinate of the center of the aperture. Default is 15.

    r        : int (optional)
    	If phot_meth is 'Aperture' and ap_shape is 'Circular', c_radius is 
    	the radius for the circular aperture. Default is 2.5.

	a        : int (optional)
		If phot_meth is 'Aperture' and ap_shape is 'Elliptical', e_semix is
		the semi-major axis for elliptical aperture (x-axis). Default is 5.

	b        : int (optional)
		If phot_meth is 'Aperture' and ap_shape is 'Elliptical', e_semiy is
		the semi-major axis for elliptical aperture (y-axis). Default is 5.

	w_r      : int (optional)
		If phot_meth is 'Aperture' and ap_shape is 'Rectangular', r_widthx is
		the full width for rectangular aperture (x-axis). Default is 5.

	h_r      : int (optional)
		If phot_meth is 'Aperture' and ap_shape is 'Rectangular', r_widthy is
		the full height for rectangular aperture (y-axis). Default is 5.

    theta    : int (optional)
    	If phot_meth is 'Aperture' and ap_shape is 'Elliptical' or
    	'Rectangular', theta is the angle of the rotation angle in radians 
    	of the semimajor axis from the positive x axis. The rotation angle 
    	increases counterclockwise. Default is 0.

	shape    : string object (optional)
    	If phot_meth is 'Aperture', ap_shape is the shape of the aperture. 
    	Possible aperture shapes are 'Circular', 'Elliptical', 'Rectangular'. 
    	Default is 'Circular'.

    method   : string object (optional)
    	If phot_meth is 'Aperture', apemethod is the method used to 
    	determine the overlap of the aperture on the pixel grid. Possible 
    	methods are 'exact', 'subpixel', 'center'. Default is 'exact'.

    Returns
    -------
    ape_sum  : 1D array
    	Array of flux with new flux appended.

    ape_sum_err: 1D array
    	Array of flux uncertainties with new flux uncertainties appended.

	'''
    l, h, w = image_data.shape
    # central position of aperture
    #position = np.c_[cx, cy] # remove when uncommenting below
    if (type(cx) is list):
        position = np.c_[cx, cy]
    else:
        position = np.c_[(cx * np.ones(l)), (cy * np.ones(l))]
    tmp_sum = []
    tmp_err = []
    # performing aperture photometry
    for i in range(l):
        #aperture = CircularAperture(position[i], r=r) # remove when uncommenting below
        if (shape == 'Circular'):
            aperture = CircularAperture(position[i], r=r)
        elif (shape == 'Elliptical'):
            aperture = EllipticalAperture(position[i], a=a, b=b, theta=theta)
        elif (shape == 'Rectangular'):
            aperture = RectangularAperture(position[i],
                                           w=w_r,
                                           h=h_r,
                                           theta=theta)
        data_error = calc_total_error(image_data[i, :, :],
                                      bg_err[i],
                                      effective_gain=1)
        phot_table = aperture_photometry(image_data[i, :, :],
                                         aperture,
                                         error=data_error,
                                         pixelwise_error=False,
                                         method=method)
        tmp_sum.extend(phot_table['aperture_sum'] * factor)
        tmp_err.extend(phot_table['aperture_sum_err'] * factor)
    # removing outliers
    tmp_sum = sigma_clip(tmp_sum, sigma=4, iters=2, cenfunc=np.ma.median)
    tmp_err = sigma_clip(tmp_err, sigma=4, iters=2, cenfunc=np.ma.median)
    ape_sum.extend(tmp_sum)
    ape_sum_err.extend(tmp_err)
    return ape_sum, ape_sum_err