def test_extract_with_noise_array(): # Get some background-subtracted test data: data = np.copy(image_data) bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3) bkg.subfrom(data) # Ensure that extraction with constant noise array gives the expected # result. We have to use conv=None here because the results are *not* # the same when convolution is on! This is because the noise map is # convolved. Near edges, the convolution doesn't adjust for pixels # off edge boundaries. As a result, the convolved noise map is not # all ones. objects = sep.extract(data, 1.5*bkg.globalrms, filter_kernel=None) objects2 = sep.extract(data, 1.5*bkg.globalrms, err=np.ones_like(data), filter_kernel=None) names_to_remove = ['errx2', 'erry2', 'errxy'] names_to_keep = [i for i in objects.dtype.names if i not in names_to_remove] objects = objects[names_to_keep] objects2 = objects2[names_to_keep] assert_equal(objects, objects2) # Less trivial test where thresh is realistic. Still a flat noise map. noise = bkg.globalrms * np.ones_like(data) objects2 = sep.extract(data, 1.5, err=noise, filter_kernel=None) names_to_remove = ['errx2', 'erry2', 'errxy'] names_to_keep = [i for i in objects.dtype.names if i not in names_to_remove] objects = objects[names_to_keep] objects2 = objects2[names_to_keep] assert_equal(objects, objects2)
def find_sources_with_sep(img): """Return sources (x, y) sorted by brightness. Use SEP package. """ import sep if isinstance(img, np.ma.MaskedArray): image = img.filled(fill_value=np.median(img)).astype('float32') else: image = img.astype('float32') bkg = sep.Background(image) thresh = 3. * bkg.globalrms try: sources = sep.extract(image - bkg.back(), thresh) except Exception as e: buff_message = 'internal pixel buffer full' if e.message[0:26] == buff_message: sep.set_extract_pixstack(600000) try: sources = sep.extract(image - bkg.back(), thresh) except Exception as e: if e.message[0:26] == buff_message: sep.set_extract_pixstack(900000) sources = sep.extract(image - bkg.back(), thresh) sources.sort(order='flux') return np.array([[asrc['x'], asrc['y']] for asrc in sources[::-1]])
def img_acc_cnn_locations(threshold, flux, img, labelimg, full_img, labelmap, locationmap): """ Evaluates the accuracy of the CNN and returns the positions of the detections. """ objectsimg,segmap_img=sep.extract(img, threshold, segmentation_map=True, filter_kernel=None) objectslabel,segmap_label=sep.extract(labelimg, 1, segmentation_map=True, filter_kernel=None) xcpeak=objectsimg['xcpeak'] ycpeak=objectsimg['ycpeak'] peak=objectslabel['peak'] label_xcpeak=objectslabel['xpeak'] label_ycpeak=objectslabel['ypeak'] image_peak=objectsimg['peak'] pos_spurs=[] pos_undetected=[] pos_correct=[] ad=0 bd=0 ua=0 ub=0 ro=0 so=0 for i in range(len(objectslabel)): xsep=label_xcpeak[i] ysep=label_ycpeak[i] pk=peak[i] if ysep>4 and ysep<145 and xsep>4 and xsep<145: if np.amax([segmap_img[ysep-2:ysep+3,xsep-2:xsep+3]])>0.1: pos_correct.append(locationmap[ysep-3:ysep+4,xsep-3:xsep+4]) if pk>flux: ad=ad+1 #pos_correct.... else: bd=bd+1 else: if pk>flux: ua=ua+1 pos_undetected.append(locationmap[ysep-3:ysep+4,xsep-3:xsep+4]) else: ub=ub+1 for i in range(len(objectsimg)): xsep=xcpeak[i] ysep=ycpeak[i] ipk=image_peak[i] if ysep>4 and ysep<145 and xsep>4 and xsep<145: if np.amax([labelmap[ysep-2:ysep+3,xsep-2:xsep+3]])>0.1: ro=ro+1 #maxarray=np.amax(labelimg[ysep-2:ysep+3,xsep-2:xsep+3]) #maxarray2=np.amax([full_img[ysep-2:ysep+3,xsep-2:xsep+3]]) #cnn_detections_true_list.append([ipk, maxarray2, maxarray]) else: so=so+1 pos_spurs.append(locationmap[ysep-3:ysep+4,xsep-3:xsep+4]) #maxarray=np.amax([full_img[ysep-2:ysep+3,xsep-2:xsep+3]]) #cnn_detections_spur_list.append([ipk, maxarray]) return (ad,bd,ua,ub,ro,so,pos_spurs,pos_correct,pos_undetected)
def convertXML(self, newXml, dataPath, image1=None, image2=None): """ convert old XML to a new coordinate by using the 'phi homed' images assuming the cobra module is in horizontal setup One can use the path for generating phi motor maps """ idx = self.cal.goodIdx idx1 = idx[idx <= self.cal.camSplit] idx2 = idx[idx > self.cal.camSplit] oldPos = self.cal.calibModel.centers newPos = np.zeros(57, dtype=complex) # read data and measure new positions if image1 is None: src1 = fits.getdata(dataPath + '/phi1Begin0.fits.gz') else: src1 = fits.getdata(dataPath + '/' + image1) if image2 is None: src2 = fits.getdata(dataPath + '/phi2Begin0.fits.gz') else: src2 = fits.getdata(dataPath + '/' + image2) data1 = sep.extract(src1.astype(float), 200) data2 = sep.extract(src2.astype(float), 200) home1 = np.array( sorted([(c['x'], c['y']) for c in data1], key=lambda t: t[0], reverse=True)) home2 = np.array( sorted([(c['x'], c['y']) for c in data2], key=lambda t: t[0], reverse=True)) newPos[idx1] = home1[:len(idx1), 0] + home1[:len(idx1), 1] * (1j) newPos[idx2] = home2[-len(idx2):, 0] + home2[-len(idx2):, 1] * (1j) # calculation tranformation offset1, scale1, tilt1, convert1 = calculation.transform( oldPos[idx1], newPos[idx1]) offset2, scale2, tilt2, convert2 = calculation.transform( oldPos[idx2], newPos[idx2]) split = self.cal.camSplit + 1 old = self.cal.calibModel new = deepcopy(self.cal.calibModel) new.centers[:split] = convert1(old.centers[:split]) new.tht0[:split] = (old.tht0[:split] + tilt1) % (2 * np.pi) new.tht1[:split] = (old.tht1[:split] + tilt1) % (2 * np.pi) new.L1[:split] = old.L1[:split] * scale1 new.L2[:split] = old.L2[:split] * scale1 new.centers[split:] = convert2(old.centers[split:]) new.tht0[split:] = (old.tht0[split:] + tilt2) % (2 * np.pi) new.tht1[split:] = (old.tht1[split:] + tilt2) % (2 * np.pi) new.L1[split:] = old.L1[split:] * scale2 new.L2[split:] = old.L2[split:] * scale2 # create a new XML file old.updateGeometry(new.centers, new.L1, new.L2) old.updateThetaHardStops(new.tht0, new.tht1) old.createCalibrationFile(dataPath + '/' + newXml)
def img_accuracy_mfil_2(threshold, img, labelimg, full_img, flux, labelmap): """ Evaluates the accuracy of the matched filter in a single image. """ imgvar=np.var(img) objectsimg,segmap_img=sep.extract(img, threshold, var=imgvar, segmentation_map=True, filter_kernel=None) objectslabel,segmap_label=sep.extract(labelimg, 1, segmentation_map=True, filter_kernel=None) xcpeak=objectsimg['xcpeak'] ycpeak=objectsimg['ycpeak'] peak=objectslabel['peak'] label_xcpeak=objectslabel['xpeak'] label_ycpeak=objectslabel['ypeak'] image_peak=objectsimg['peak'] above_detections=0 below_detections=0 undetected_above=0 undetected_below=0 real_objects=0 spur_objects=0 for i in range(len(objectslabel)): xsep=label_xcpeak[i] ysep=label_ycpeak[i] pk=peak[i] if ysep>4 and ysep<145 and xsep>4 and xsep<145: if np.amax([segmap_img[ysep-2:ysep+3,xsep-2:xsep+3]])>0.1: if pk>flux: above_detections=above_detections+1 else: below_detections=below_detections+1 else: if pk>flux: undetected_above=undetected_above+1 else: undetected_below=undetected_below+1 for i in range(len(objectsimg)): xsep=xcpeak[i] ysep=ycpeak[i] ipk=image_peak[i] if ysep>4 and ysep<145 and xsep>4 and xsep<145: if np.amax([labelmap[ysep-2:ysep+3,xsep-2:xsep+3]])>0.1: real_objects=real_objects+1 maxarray=np.amax(labelimg[ysep-2:ysep+3,xsep-2:xsep+3]) maxarray2=np.amax([full_img[ysep-2:ysep+3,xsep-2:xsep+3]]) mfilter_detections_true_list.append([ipk/np.sqrt(imgvar), maxarray2, maxarray]) else: spur_objects=spur_objects+1 maxarray=np.amax([full_img[ysep-2:ysep+3,xsep-2:xsep+3]]) mfilter_detections_spur_list.append([ipk/np.sqrt(imgvar), maxarray]) return (above_detections,below_detections,undetected_above,undetected_below,real_objects,spur_objects)
def sextractor(im,err=None,mask=None,nsig=5.0,gain=1.0): # Check byte order, SEP needs little endian if im.dtype.byteorder == '>': data = im.byteswap().newbyteorder() else: data = im # Background estimation and subtraction bkg = sep.Background(data, mask, bw=256, bh=256, fw=3, fh=3) bkg_image = bkg.back() data_sub = data-bkg #data_sub[data>50000]=0.0 # Detect and extract objects if err is None: objects = sep.extract(data_sub, nsig, err=bkg.globalrms, mask=mask) else: objects = sep.extract(data_sub, nsig, err=err, mask=mask) # Get mag_auto in 2 steps kronrad, krflag = sep.kron_radius(data_sub, objects['x'], objects['y'], objects['a'], objects['b'], objects['theta'], 6.0, mask=mask) flux, fluxerr, flag = sep.sum_ellipse(data_sub, objects['x'], objects['y'], objects['a'], objects['b'], objects['theta'], 2.5*kronrad, subpix=1, err=err, mask=mask, gain=gain) flag |= krflag # combine flags into 'flag' # Use circular aperture if Kron radius is too small r_min = 1.75 # minimum diameter = 3.5 use_circle = kronrad * np.sqrt(objects['a'] * objects['b']) < r_min if np.sum(use_circle)>0: cflux, cfluxerr, cflag = sep.sum_circle(data_sub, objects['x'][use_circle], objects['y'][use_circle], r_min, subpix=1, err=err, mask=mask, gain=gain) flux[use_circle] = cflux fluxerr[use_circle] = cfluxerr flag[use_circle] = cflag mag_auto = -2.5*np.log10(flux)+25.0 magerr_auto = 1.0857*fluxerr/flux # Make the final catalog newdt = np.dtype([('kronrad',float),('flux_auto',float),('fluxerr_auto',float),('mag_auto',float),('magerr_auto',float)]) cat = dln.addcatcols(objects,newdt) cat['flag'] |= flag cat['kronrad'] = kronrad cat['flux_auto'] = flux cat['fluxerr_auto'] = fluxerr cat['mag_auto'] = mag_auto cat['magerr_auto'] = magerr_auto return cat
def _mask(self, hdu, sci_ext): im = hdu[sci_ext].data + 0 try: mask = hdu['mask'].data.astype(bool) mask[im < DATA_FLOOR] = True except KeyError: opts = dict(bw=64, bh=64, fw=3, fh=3) mask = np.zeros(im.shape, bool) for i in range(2): mask[im < DATA_FLOOR] = True bkg = sep.Background(im, mask=mask, **opts) objects, mask = sep.extract(im - bkg, 2, err=bkg.globalrms, segmentation_map=True) mask = mask != 0 sources = mask.copy() mask[im < DATA_FLOOR] = True # unmask objects near target lbl, n = nd.label(mask) try: cen = hdu['sci'].header['tgty'], hdu['sci'].header['tgtx'] for m in np.unique(lbl[cen[0]-2:cen[0]+3, cen[1]-2:cen[1]+3]): mask[lbl == m] = False except KeyError: pass # add nans mask += ~np.isfinite(im.data) return sources, mask
def run_sep_extractor(self): # Not entirely sure what this is for but the likeliness of a memory # error occurring in call to sep.extract is inversely proportional to # the pixstack... sep.set_extract_pixstack(10000000) data = self.image_data_formatted # generate a background map of data bkg = sep.Background(data) # subtract the background map from data data_sub = data - bkg self.data_sub_bkg = data_sub threshold = 100 #2 * np.std(data_sub) + np.min(data_sub) star_objects = sep.extract(data_sub, threshold, minarea=9, mask=self.image_mask, gain=3, deblend_nthresh=32, deblend_cont=0.0005) pix_thresh = 100 good_objects = star_objects[star_objects['flag'] < 8] good_objects = good_objects[good_objects['npix'] < pix_thresh] return good_objects
def globalscalenoise(cube, outcube, memmap=False): """ Read a cube and compute a global scaling factor to the variance such that the variance in the data is consistent with the variance extension. The scaling factor is calculated in regions of the spectrum free from skylines. """ #Read cube hdu = fits.open(cube, memmap=memmap) data = hdu[1].data std = np.sqrt(hdu[2].data) nz, ny, nx = np.shape(data) wave = hdu[1].header['CRVAL3'] + np.arange(nz) * hdu[1].header['CD3_3'] #compress into image image = np.nanmedian(data, axis=0) nx, ny = image.shape #mask edges edges = np.isfinite(image) badmask = np.zeros((nx, ny)) + 1 badmask[edges] = 0.0 badmask = ndimage.gaussian_filter(badmask, 1.5) badmask[np.where(badmask > 0)] = 1.0 #mask sources bkg = sep.Background(image, mask=badmask) thresh = 1.5 * bkg.globalrms segmap = np.zeros((nx, ny)) objects, segmap = sep.extract(image, thresh, segmentation_map=True, minarea=10, clean=True, mask=badmask) badmask[np.where(segmap > 0)] = 1.0 tonan = (badmask > 0) badmask[tonan] = np.nan badmask[np.logical_not(tonan)] = 1 mask3d = np.broadcast_to(badmask, (nz, ) + badmask.shape) fsig = data / std * mask3d fsig_1d = np.nanstd(fsig, axis=(1, 2)) okwave = ((wave > 4700) & (wave < 5800)) | ((wave > 6600) & (wave < 6800)) #Average does not like nans, mask them out global_offset = np.nanmedian(fsig_1d[okwave]) hdu[2].data *= global_offset**2 hdu[2].header['VARSCALE'] = global_offset**2 hdu.writeto(outcube, overwrite=True)
def sep_phot(data, ap, th): """ Preforms photometry by SEP, similar to source extractor """ # Measure a spatially variable background of some image data (np array) try: bkg = sep.Background(data) # , mask=mask, bw=64, bh=64, fw=3, fh=3) # optional parameters except ValueError: data = data.byteswap(True).newbyteorder() bkg = sep.Background(data) # , mask=mask, bw=64, bh=64, fw=3, fh=3) # optional parameters # Directly subtract the background from the data in place bkg.subfrom(data) # for the background subtracted data, detect objects in data given some threshold thresh = th * bkg.globalrms # ensure the threshold is high enough wrt background objs = sep.extract(data, thresh) # calculate the Kron radius for each object, then we perform elliptical aperture photometry within that radius kronrad, krflag = sep.kron_radius(data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], ap) flux, fluxerr, flag = sep.sum_ellipse(data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], 2.5 * kronrad, subpix=1) flag |= krflag # combine flags into 'flag' r_min = 1.75 # minimum diameter = 3.5 use_circle = kronrad * np.sqrt(objs['a'] * objs['b']) < r_min x = objs['x'] y = objs['y'] cflux, cfluxerr, cflag = sep.sum_circle(data, x[use_circle], y[use_circle], r_min, subpix=1) flux[use_circle] = cflux fluxerr[use_circle] = cfluxerr flag[use_circle] = cflag return objs
def run_sep_on_sub(bad_pix_dir, set_num): path = '/Users/mcurrie/GitRepos/step_1/%s/set_%s_epochs/' % (bad_pix_dir, str(set_num)) try: data_fl = path + 'F125W_epoch02_drz.fits' ref_fl = path + 'F125W_epoch01_drz.fits' data = pyfits.open(data_fl) ref = pyfits.open(ref_fl) except IOError: data_fl = path + 'F110W_epoch02_drz.fits' ref_fl = path + 'F110W_epoch01_drz.fits' data = pyfits.open(data_fl) ref = pyfits.open(ref_fl) sub = data[1].data - ref[1].data mask = (data[3].data > 0) & (ref[3].data > 0) mask = ~mask data.close() ref.close() extract = sep.extract(sub, mask=mask, thresh=0.035) with open( '/Users/mcurrie/GitRepos/step_1/%s/object_coords_table.txt' % bad_pix_dir, 'wb') as fl: fl.write('x\ty\n') for obj in extract: x = int(obj[7]) y = int(obj[8]) if np.isnan(np.sum(sub[y - 20:y + 21, x - 20:x + 21])): print 'found a nan' continue else: fl.write('%i\t%i\n' % (x, y))
def aperture_photometry(image_data: np.ndarray, background: sep.Background) -> List[Star]: """ performs aperture photometry on an image :param image_data: data of the image :param background: background of the image :return: a list of extracted stars """ # extract objects using source extractor objects = sep.extract(image_data, AperturePhotometry.threshold, err=background.globalrms) # perform aperture photometry aperture = sep.sum_circle( image_data, objects["x"], objects["y"], AperturePhotometry.radius, err=background.globalrms, gain=AperturePhotometry.gain, ) # create and return a list of star objects return [ Star(x, y, flux, fluxerr) for x, y, flux, fluxerr, _ in zip( objects["x"], objects["y"], *aperture) ]
def genSegmap(cutoutName): '''Create segmenation image using the sep SExtractor module.''' cutoutData = fits.getdata(cutoutName).astype(float) # filter kernel filter_kernel = np.loadtxt( f'{realsim_dir}/Sources/utils/sdss-cfg/gauss_3.0_7x7.conv', skiprows=2) # use std of full image as detection threshold guess_rms = np.std(cutoutData) # mask all sources above std for background statistics mask = ((cutoutData - np.median(cutoutData)) > guess_rms) # https://github.com/kbarbary/sep/issues/33: convert to float # bkg object which includes back() and rms() methods bkg = sep.Background(cutoutData, mask=mask, bw=32, bh=32, fw=3, fh=3) # run sep.extract() on image objCat, segmap = sep.extract(cutoutData - bkg.back(), thresh=1.0, err=bkg.rms(), mask=None, minarea=5, filter_kernel=filter_kernel, filter_type='conv', deblend_nthresh=32, deblend_cont=0.001, clean=True, clean_param=1.0, segmentation_map=True) return segmap
def extract(residual_s, thresh=None): """ Uses sep to find sources on a residual image(s) Arguments: residuals -- image of residuals from hotpants or a list of images Returns: A list of Source objects representing the location and various metrics of detected variable sources """ residuals = [] if isinstance(residuals, list): residuals = residual_s else: residuals.append(residual_s) sources = [] for r in residuals: r_np = to_np(r) if thresh is None: # from astroalign’s settings bkg = sep.Background(r_np) sources.append( sep.extract(r_np - bkg.back(), bkg.globalrms * 3.0, segmentation_map=True)) return sources if isinstance(residuals, list) else sources[0]
def run_source_extractor(N, draw_figures=False): gal_params_file = os.path.join('params', 'gal_sim_params.txt') real_params = np.loadtxt(gal_params_file) position_data = np.zeros((POS_NUM_GAL * N, 2)) num_found_data = np.zeros(N) for i in range(N): fits_file_name = os.path.join('blends', 'single_blend%d.fits' % i) data = fits.getdata(fits_file_name) data = data.byteswap(inplace=True).newbyteorder() bkg = sep.Background(data) # subtract background noise data_sub = data - bkg objects = sep.extract(data_sub, 1.5, err=bkg.globalrms) if (draw_figures): draw_figure(data_sub, objects) for j in range(len(objects)): position_data[2 * i + j][0] = objects['x'][j] position_data[2 * i + j][1] = objects['y'][j] num_found_data[i] = len(objects) if (not draw_figures): pos_file_name = os.path.join('params', 'sep_positions.txt') num_file_name = os.path.join('params', 'sep_num_found.txt') np.savetxt(pos_file_name, np.asarray(position_data)) np.savetxt(num_file_name, num_found_data) print('updated training data')
def run(self, image): data = image.data.byteswap().newbyteorder() sep_data = extract(image.data, self.threshold * np.median(data)) coordinates = np.array([sep_data["x"], sep_data["y"]]).T fluxes = np.array(sep_data["flux"]) image.stars_coords, image.peaks = self.clean(fluxes, coordinates)
def detect_sources(data, threshold, elipse = False, *args, **kwargs): ''' Detect the sources of the image. Parameters: data : ~numpy.ndarray~ 2D image data. threshold : float or ~numpy.ndarray~ The threshold of the detection. elipse : bool Tell the program if you want the elipses parameters for each object. **kwargs will be passed integrally to the sep functions. Returns: x, y : ~numpy.ndarray~ The positions of the detected sources. a, b, theta : ~numpy.ndarray~ The parameters of the detected elipses. ''' data = _fix_data(data) objs = sep.extract(data, threshold, **kwargs) if elipse: return objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'] else: return objs['x'], objs['y']
def extractSourcesFromRCD4(filename, biasimage, bkg, bkg_rms): hnumpix = 2048 vnumpix = 2048 gain = 'low' try: fid = open(filename, 'rb') # fid.seek(0,0) # magicnum = readxbytes(fid,4) # 4 bytes ('Meta') # Check the magic number. If it doesn't match, exit function fid.seek(152, 0) timestamp = readxbytes(fid, 29) print(timestamp) # Load data portion of file fid.seek(246, 0) # fid.seek(384,0) table = np.fromfile(fid, dtype=np.uint8, count=12582912) testimages = nb_read_data(table) image = split_images(testimages, hnumpix, vnumpix, gain) image = image.astype('int32') image = image.copy(order='C') fid.close() image = subtractBias(image, biasimage) # m, s = np.mean(image), np.std(image) # bkg = sep.Background(image) data_sub = image - bkg objects = sep.extract(data_sub, 2.5, err=bkg_rms) except Exception: print(filename) print('Error with filename')
def __sepFindFWHM(self,tries): from astropy.io import fits import math import traceback focpos=[] fwhm=[] fwhm_min=None fwhm_MinimumX=None keys = list(tries.keys()) keys.sort() ln2=math.log(2) for k in keys: try: fwhms=[] ff=fits.open(tries[k]) # loop on images.. for i in range(1,len(ff)): data=ff[i].data bkg=sep.Background(numpy.array(data,numpy.float)) sources=sep.extract(data-bkg, 5.0 * bkg.globalrms) for s in sources: fwhms.append(2 * math.sqrt(ln2 * (s[12]**2 + s[13]**2))) im_fwhm=numpy.median(fwhms) # find median from fwhms measurements.. self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,im_fwhm,len(fwhms))) focpos.append(k) fwhm.append(im_fwhm) if (fwhm_min is None or im_fwhm < fwhm_min): fwhm_MinimumX = k fwhm_min = im_fwhm except Exception as ex: self.log('W','offset {0}: {1} {2}'.format(k,ex,traceback.format_exc())) return focpos,fwhm,fwhm_min,fwhm_MinimumX
def extractSourcesFromRCD(filename, bias, hnumpix, vnumpix, gain): try: fid = open(filename, 'rb') # fid.seek(0,0) # magicnum = readxbytes(fid,4) # 4 bytes ('Meta') # Check the magic number. If it doesn't match, exit function fid.seek(152, 0) timestamp = readxbytes(fid, 29) print(timestamp) # Load data portion of file fid.seek(246, 0) # fid.seek(384,0) table = np.fromfile(fid, dtype=np.uint8, count=12582912) testimages = nb_read_data(table) image = split_images(testimages, hnumpix, vnumpix, gain) image = image.astype('int32') image = image.copy(order='C') fid.close() image = subtractBias(image, biasimage) # m, s = np.mean(image), np.std(image) # Can we just run the background on the first image from every set? # And bkg.globalrms can be calculated once... bkg = sep.Background(image) data_sub = image - bkg objects = sep.extract(data_sub, 1.5, err=bkg.globalrms) except Exception: print('') print('Error with filename')
def get_segmap(self, targetmag=None, on="fakeimage", thresh=0.1, deblend_cont=1e-4, source_kwargs={}, update=True, **kwargs): """ """ import sep if targetmag is not None: self.build_pointsource_image(targetmag, **source_kwargs) sepo, segmap = sep.extract(getattr(self, on), thresh, segmentation_map=True, deblend_cont=deblend_cont, **kwargs) sepout = pandas.DataFrame(sepo) # Some measurements deltax, deltay = (self.coords_refpixel - sepout[['x', 'y']].values).T sepout["dist_to_target"] = np.sqrt(deltax**2 + deltay**2) if update: self._sepout = sepout self._segmap = segmap return segmap, sepout
def compare_image(the_image): """Return the fraction of sources found in the reference image""" # pixel comparison is not good, doesn't work. Compare catalogs. if isinstance(the_image, np.ma.MaskedArray): full_algn = the_image.filled(fill_value=np.median(the_image))\ .astype('float32') else: full_algn = the_image.astype('float32') # full_algn[the_image == 0] = np.median(the_image) import sep bkg = sep.Background(full_algn) thresh = 3.0 * bkg.globalrms allobjs = sep.extract(full_algn - bkg.back(), thresh) allxy = np.array([[obj['x'], obj['y']] for obj in allobjs]) from scipy.spatial import KDTree ref_coordtree = KDTree(self.star_ref_pos) # Compare here srcs list with self.star_ref_pos num_sources = 0 for asrc in allxy: found_source = ref_coordtree.query_ball_point(asrc, 3) if found_source: num_sources += 1 fraction_found = float(num_sources) / float(len(allxy)) return fraction_found
def extract_sources(self, thresh=2, err=None, mask=None, data="dataclean", setradec=True, setmag=True, update=True, **kwargs): """ uses sep.extract to extract sources 'a la Sextractor' """ from sep import extract if err is None: err = self.get_noise() elif err in ["None"]: err = None if mask is None: mask = self.get_mask() elif mask in ["None"]: mask = None sout = extract(getattr(self, data).byteswap().newbyteorder(), thresh, err=err, mask=mask, **kwargs) _sources = pandas.DataFrame(sout) if setradec: ra, dec= self.pixels_to_coords(*_sources[["x","y"]].values.T) _sources["ra"] = ra _sources["dec"] = dec if setmag: _sources["mag"] = self.counts_to_mag(_sources["flux"], None)[0] # Errors to be added if not update: return _sources self.set_catalog(_sources, "sources")
def sourceExtractImage(data, bkgArr=None, sortType='centre', silence=False, **kwargs): """Extract sources from data array and return enumerated objects sorted smallest to largest, and the segmentation map provided by source extractor """ if not silence: log.info('Performing object detection using SourceExtractor') if bkgArr is None: bkgArr = np.zeros(data.shape) o = sep.extract(data.copy(), kwargs.pop('threshold', 0.05), segmentation_map=True, **kwargs) if sortType == 'size': if not silence: log.info('Sorting extracted objects by radius from size') sizeSortedObjects = sorted( enumerate(o[0]), key=lambda src: src[1]['npix'] ) return sizeSortedObjects, o[1] elif sortType == 'centre': if not silence: log.info('Sorting extracted objects by radius from centre') centreSortedObjects = sorted( enumerate(o[0]), key=lambda src: ( (src[1]['x'] - data.shape[0] / 2)**2 + (src[1]['y'] - data.shape[1] / 2)**2 ) )[::-1] return centreSortedObjects, o[1]
def genSegmap(cutoutName): '''Create segmenation image using the sep SExtractor module.''' cutoutData = fits.getdata(cutoutName) # filter kernel filter_kernel = np.loadtxt( '{}Sources/utils/CFIS-cfg/gauss_3.0_7x7.conv'.format(RSDIR), skiprows=2) # use std of full image as detection threshold guess_rms = np.std(cutoutData) # mask all sources above std for background statistics mask = (cutoutData > guess_rms) # https://github.com/kbarbary/sep/issues/23 cutoutData_sw = cutoutData.byteswap(True).newbyteorder() # bkg object which includes sky() and rms() methods bkg = sep.Background(cutoutData_sw, mask=mask, bw=32, bh=32, fw=3, fh=3) # run sep.extract() on image objCat, segmap = sep.extract(cutoutData_sw, thresh=1.0, err=bkg.rms(), mask=None, minarea=5, filter_kernel=filter_kernel, filter_type='conv', deblend_nthresh=32, deblend_cont=0.001, clean=True, clean_param=1.0, segmentation_map=True) return segmap
def sep_extract(data, threshold=3): ''' Extract sources from an image using SEP. Parameters ========== data : 2d ndarray Image containing the sources threshold : float The threshold value for detection, in number of sigma. Returns ======= sources : np.recarray A list of sources, as returned by sep.extract, and ordered by flux. See documentation of sep.extract for a description of the fields. ''' if isinstance(data, np.ma.MaskedArray): image = data.filled(fill_value=np.median(data)).astype(np.float32) else: image = data.astype(np.float32) bkg = sep.Background(image) thresh = threshold * bkg.globalrms sources = sep.extract(image - bkg.back(), thresh) sources.sort(order='flux') # sources = sources.view(np.recarray) return sources
def compare_image(the_image): """Return the fraction of sources found in the original image""" # pixel comparison is not good, doesn't work. Compare catalogs. if isinstance(the_image, np.ma.MaskedArray): full_algn = the_image.filled(fill_value=np.median(the_image))\ .astype('float32') else: full_algn = the_image.astype('float32') full_algn[the_image == 0] = np.median(the_image) import sep bkg = sep.Background(full_algn) thresh = 3.0 * bkg.globalrms allobjs = sep.extract(full_algn - bkg.back(), thresh) allxy = np.array([[obj['x'], obj['y']] for obj in allobjs]) from scipy.spatial import KDTree ref_coordtree = KDTree(self.star_new_pos) # Compare here srcs list with self.star_ref_pos num_sources = 0 for asrc in allxy: found_source = ref_coordtree.query_ball_point(asrc, 3) if found_source: num_sources += 1 fraction_found = float(num_sources) / float(len(allxy)) return fraction_found
def find_limited(self, data, threshold=1.5, table=True, only_best=True, max_sources=200): try: bkg = Background(data) data_sub = data - bkg all_objects = asarray( extract(data_sub, threshold, err=bkg.globalrms)) if only_best: all_objects = all_objects[all_objects['flag'] == 0] ord_objects = nsort(all_objects, order=['flux']) if len(ord_objects) <= max_sources: max_sources = len(ord_objects) objects = ord_objects[::-1][:max_sources] if len(ord_objects) > max_sources: objects = ord_objects[::-1][:max_sources] elif not max_sources: objects = ord_objects[::-1] if table: return (TBL(objects)) else: return (objects) except Exception as e: self.etc.log(e)
def sourceExtractImage(data, bkgArr=None, sortType='centre', verbose=False, **kwargs): """Extract sources from data array and return enumerated objects sorted smallest to largest, and the segmentation map provided by source extractor """ data = np.array(data).byteswap().newbyteorder() if bkgArr is None: bkgArr = np.zeros(data.shape) o = sep.extract(data, kwargs.pop('threshold', 0.05), segmentation_map=True, **kwargs) if sortType == 'size': if verbose: print('Sorting extracted objects by radius from size') sizeSortedObjects = sorted(enumerate(o[0]), key=lambda src: src[1]['npix']) return sizeSortedObjects, o[1] elif sortType == 'centre': if verbose: print('Sorting extracted objects by radius from centre') centreSortedObjects = sorted( enumerate(o[0]), key=lambda src: ((src[1]['x'] - data.shape[0] / 2)**2 + (src[1]['y'] - data.shape[1] / 2)**2))[::-1] return centreSortedObjects, o[1]
def identify_objects(image_data, nsigma, min_area, deb_n_thresh, deb_cont, param_dict): ''' This function performs source identification and generates a segmentation map, which is then used for masking the sources. :param image_data: provide the image data, which is a mxn numpy nd array. e.g., fits.getdata('image_file_name') :param nsigma: source detection significance. :param min_area: minimum area to be considered as a source :param deb_n_thresh: number of threshold values for deblending routine. e.g., 32, 64 etc. :param deb_cont: deblend minimum contrast ratio (see source extraction or SEP python page). :param param_dict: a dictionary containing the 'sep_filter_kwarg' = filter keyword argument, which can be a 'tophat', 'gauss', or 'boxcar' 'sep_filter_size' = the 'size' of the filter. In case of gaussian, it is the FWHM of the gaussian. For tophat, it is the radius of the tophat filter. For boxcar, it is the side length of the 2D Box. :return: objects: a numpy array of the objects, ordered as per their segmentation values in the segmap. segmap: a segmentation map, where each source is marked with unique source identification number. ''' # Note, this whole routine uses a Python-based source identification module named SEP (Barbary et al., 2016) # Unpack the filter keyword and its size from the parameter dictionary. filter_kwarg = param_dict['sep_filter_kwarg'] filter_size = float(param_dict['sep_filter_size']) # Look at the SEP webpage, this is suggested for working of SEP. byte_swaped_data = image_data.byteswap().newbyteorder() # SEP estimates a global background. global_bkg = sep.Background(byte_swaped_data) # background subtracted data = original data - estimated global background. bkg_subtracted = byte_swaped_data - global_bkg # In the following block, we check for the user's choice of filter and its size. # We define a kernel based on their choice. if filter_kwarg.lower() not in ['tophat', 'gauss', 'boxcar']: warnings.warn( 'The filter %s is not supported as of yet, defaulting to tophat of radius 5' ) source_kernel = Tophat2DKernel(5) elif filter_kwarg.lower() == 'tophat': source_kernel = Tophat2DKernel(filter_size) elif filter_kwarg.lower() == 'gauss': _gauss_sigma = gaussian_fwhm_to_sigma(filter_size) source_kernel = Gaussian2DKernel(_gauss_sigma) elif filter_kwarg.lower() == 'boxcar': source_kernel = Box2DKernel(filter_size) # Object detection and Segmentation map generataion. objects, segmap = sep.extract(bkg_subtracted, nsigma, err=global_bkg.globalrms, minarea=min_area, deblend_nthresh=deb_n_thresh, deblend_cont=deb_cont, segmentation_map=True, filter_kernel=source_kernel.array) return objects, segmap
def svm_clf(path): data_set = [] data1 =fits.open(path) data = data1[0].data data = data[132:991, 132:991] # 检测图像背景,获得去除背景后的图像 # Detect the background of the picture and obtain the picture after removing the background data = data.astype(np.float64) bkg = sep.Background(data, mask=None, bw=64, bh=64, fw=3, fh=3) data_sub = data - bkg # 得到去噪后的数据 objects = sep.extract(data_sub, 2.5, err=bkg.globalrms, deblend_nthresh=1) # 获得亮星数目 # Get the number of bright stars number = 0 for i in range(len(objects)): a = objects[i][15] b = objects[i][16] a = max(a, b) b = min(a, b) # 控制星象大小 # Control star size if a < 32 and b > 2.5: number = number + 1 else: number = number m1, s1 = np.mean(data_sub), np.std(data_sub) data_sub = data_sub.astype(np.uint16) # 获得灰度共生矩阵参数 # Obtain gray level co-occurrence matrix parameters gray = color.rgb2gray(data_sub) image = img_as_ubyte(gray) bins = np.array([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255]) # 16-bit inds = np.digitize(image, bins) max_value = inds.max() + 1 matrix_coocurrence = greycomatrix(inds, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=max_value, normed=False, symmetric=False) cons = np.sum(contrast_feature(matrix_coocurrence)) / 4 diss = np.sum(dissimilarity_feature(matrix_coocurrence)) / 4 h**o = np.sum(homogeneity_feature(matrix_coocurrence)) / 4 asmm = np.sum(asm_feature(matrix_coocurrence)) / 4 ener = np.sum(energy_feature(matrix_coocurrence)) / 4 corr = np.sum(correlation_feature(matrix_coocurrence)) / 4 # 熵的计算 # Entropy calculation shan = shannon_entropy(image) data_set = [[m1,number,corr,s1,h**o,shan,asmm,ener,cons]] # 加载保存好的模型进行预测 # Load the saved model for prediction clf = joblib.load('./rf_clf.m') a=clf.predict(data_set) a=int(a[0]) print(a) cnn_set=[path,m1,number,corr,s1,h**o,shan,asmm,ener,cons,diss,a] return cnn_set
def detect_high_sb_objects(img, sig, threshold=30.0, min_area=100, mask=None, deb_thr_hsig=128, deb_cont_hsig=0.0001, mu_limit=23.0, sig_hsig_1=0.1, sig_hsig_2=4.0, verbose=False): """Detect all bright objects and mask them out.""" # Step 1: Detect bright objects on the image ''' From Greco et al. 2018: Next, we find very bright sources by flagging all pixels that are at least 28σ above the global background level for each patch; for a typical patch, this corresponds to the brightest ∼2% of all pixels. The background and its variance are estimated using several iterations of sigma clipping. In this work, we choose to detect two group of bright objects: 1: > 20 sigma, size > 200 2: > 15 sigma, size > 10000 ''' # Object detection: high threshold, relative small minimum size obj_hsig, seg_hsig = sep.extract(img, threshold, err=sig, minarea=min_area, mask=mask, deblend_nthresh=deb_thr_hsig, deblend_cont=deb_cont_hsig, segmentation_map=True) # Remove objects with low peak surface brightness idx_low_peak_mu = [] obj_hsig = Table(obj_hsig) for idx, obj in enumerate(obj_hsig): if obj_peak_mu(obj) >= mu_limit: seg_hsig[seg_hsig == (idx + 1)] = 0 idx_low_peak_mu.append(idx) obj_hsig.remove_rows(idx_low_peak_mu) if verbose: print("# Keep %d high surface brightness objects" % len(obj_hsig)) # Generate a mask msk_hsig = seg_to_mask(seg_hsig, sigma=sig_hsig_1, msk_max=1000.0, msk_thr=0.01) msk_hsig_large = seg_to_mask(seg_hsig, sigma=sig_hsig_2, msk_max=1000.0, msk_thr=0.005) return obj_hsig, msk_hsig, msk_hsig_large
def find_sources(imgfile, nsig = 1.5, minarea = 10., clean=True, deblend_cont = 0.0001, regfile = None, write = None, bkgsub = True): """ Find sources in the whitelight image using SExtractor. Args: imgfile (str): An image fits file n_sig (float, optional): Detection threshold in units of sky background rms. minarea (float, optional): minimum area in pixels to be considered a valid detection. clean (bool, optional): Perform cleaning? deblend_cont (float, optional): Minimum contrast ratio used for object deblending. Default is 0.005. To entirely disable deblending, set to 1.0. regfile (str, optional): A ds9 region file of areas to be masked out. write (str, optional): write extracted object table to this path. bkgsub (bool, optional): perform background subtraction? Default is set to true. Returns: objects (Table): Summary table of detected objects. segmap (ndarray): Segmentation map. """ # Get whitelight image hdulist = fits.open(imgfile) white = hdulist[0] data = white.data data = data.byteswap().newbyteorder() # sep requires this # Make a mask if available if regfile: reg = pyreg.open(regfile).as_imagecoord(white.header) mask = reg.get_filter().mask(data) else: mask = None # Characterize sky bkg = sep.Background(data, mask = mask) # Subtract background? if bkgsub: bkg.subfrom(data) # Compute background again bkg = sep.Background(data, mask = mask) # Compute source detection threshold thresh = nsig*bkg.globalrms # Extract sources objects, segmap = sep.extract(data, thresh = thresh, mask = mask, deblend_cont = deblend_cont, minarea = minarea, clean = clean, segmentation_map=True) if write: Table(objects).write(write, overwrite = True) return Table(objects), segmap
def extract_sources(data): data = data.byteswap(True).newbyteorder() bkg = sep.Background(data) back = bkg.back() data_woback = data-back thresh = 1.5 * bkg.globalrms objects = sep.extract(data_woback, thresh) return objects
def _find_sources(img, sigma_thresh=3., bkg_rms=None): "Return sources (x, y) sorted by brightness." import sep if isinstance(img, _np.ma.MaskedArray): image = img.filled(fill_value=_np.median(img)).astype('float32') else: image = img.astype('float32') if not bkg_rms: bkg = sep.Background(image) thresh = sigma_thresh * bkg.globalrms sources = sep.extract(image - bkg.back(), thresh) else: thresh = sigma_thresh * bkg_rms sources = sep.extract(image, thresh) sources.sort(order='flux') return _np.array([[asrc['x'], asrc['y']] for asrc in sources[::-1]])
def run(self, step_name, mask=None): try: img = self.image.copy() except AttributeError: logger.error('You must setup image before running pipeline!') exit(1) logger.info('running ' + step_name) kws = self.step_kws[step_name].copy() sep_extract_kws = kws.pop('sep_extract_kws', {}) sep_extract_kws = check_kwargs_defaults(sep_extract_kws, self.SEP_EXTRACT_DEFAULTS) sep_back_kws = kws.pop('sep_back_kws', {}) sep_back_kws = check_kwargs_defaults(sep_back_kws, self.SEP_BACK_DEFAULTS) if mask is not None: logger.info('applying mask') sep_extract_kws['mask'] = mask sep_back_kws['mask'] = mask # define gaussian kernel for detection # TODO: make filter shape optional num_fwhm = sep_extract_kws.pop('filter_num_fwhm', 1) kernel_fwhm = self.psf_fwhm * num_fwhm logger.info('smoothing with kernel with fwhm = {:.2f} arcsec'.\ format(kernel_fwhm * utils.pixscale)) kern = Gaussian2DKernel(kernel_fwhm * gaussian_fwhm_to_sigma, mode='oversample') kern.normalize() sep_extract_kws['filter_kernel'] = kern.array # estimate background and subtract from image bkg = sep.Background(img, **sep_back_kws) img_sub = img - bkg # extract sources logger.info('detecting with a threshold of {} x background'.\ format(sep_extract_kws['thresh'])) sources, segmap = sep.extract( img_sub, err=bkg.rms(), segmentation_map=True, **sep_extract_kws) sources = Table(sources) sources = sources[sources['flux'] > 0] logger.info('found {} sources'.format(len(sources))) if kws['do_measure']: sources = self._measure(img, sources, mask) sources['seg_label'] = np.arange(1, len(sources) + 1) self.sources[step_name] = sources return sources, segmap
def createMask(data, thresh=100, title="Mask", plotMask=True): extraction = sep.extract(data, thresh, segmentation_map=True) mask = extraction[1] if plotMask: plt.figure() plt.imshow(mask, aspect="auto", interpolation="nearest", origin="lower") plt.title(title) plt.show() return mask
def test_long_error_msg(): """Ensure that the error message is created successfully when there is an error detail.""" # set extract pixstack to an insanely small value; this will trigger # a detailed error message when running sep.extract() old = sep.get_extract_pixstack() sep.set_extract_pixstack(5) data = np.ones((10, 10), dtype=np.float64) with pytest.raises(Exception) as excinfo: sep.extract(data, 0.1) msg = excinfo.value.args[0] assert type(msg) == str # check that message is the native string type assert msg.startswith("internal pixel buffer full: The limit") # restore sep.set_extract_pixstack(old)
def auto_fit(image_data): an_width = 3 temp_image_data = image_data internal_image_data = temp_image_data.byteswap(True).newbyteorder() bkg = sep.Background(internal_image_data) thresh = 1.5 * bkg.globalback objects = sep.extract(internal_image_data, thresh) center_x = internal_image_data.shape[0]/2.0 center_y = internal_image_data.shape[1]/2.0 center = [center_x, center_y] smallest_dFoc = 1000000000 radii = 21 count = 0 for i, j in zip(objects['x'], objects['y']): pos = [i, j] dFoc = math.sqrt(((pos[0]-center[0])**2) + ((pos[1]-center[1])**2)) if abs(dFoc) < smallest_dFoc: smallest_dFoc = dFoc minx = objects['xmin'][count] miny = objects['ymin'][count] maxx = objects['xmax'][count] maxy = objects['ymax'][count] radii = abs(math.sqrt(((maxx-minx)**2)+((maxy-miny)**2))) else: pass count += 1 i = 0 while i < 25: theta = 0 area = 0 while theta <= 2*pi: area += (((i+.1)**2)/2) - ((i**2)/2) theta += .001 flux, fluxerr, flag = sep.sum_circann(internal_image_data, internal_image_data.shape[0]/2, internal_image_data.shape[1]/2, i, i+an_width) metric = (flux - bkg.globalback)/bkg.globalrms metric /= area if i > 1: if metric < smallest_metric: smallest_metric = metric annulus = [i, i+an_width] else: smallest_metric = metric annulus = [i, i+an_width] i += 1 i = 1 an = [math.ceil(x*6) for x in annulus] if radii > an[0]: radii = an[0] - 0.5 if radii > 30: radii = 30 else: pass return {'Ap': radii, 'InAn': an[0], 'OutAn': an[1]}
def extract_sources(data): try: bkg = sep.Background(data) except ValueError: data = data.byteswap().newbyteorder() bkg = sep.Background(data) back = bkg.back() data_woback = data-back thresh = 3.0 * bkg.globalrms objects = sep.extract(data_woback, thresh) return objects
def find_stars_on_data(data, verbose = 0, useDS9 = False): bkg = sep.Background(data) bkg.subfrom(data) thres = 1.5 * bkg.globalrms if verbose > 1: print('global average background: {0:.2f} rms: {1:.3f} threshold: {2:.3f}'.format(bkg.globalback, bkg.globalrms, thres)) objects = sep.extract(data, thres) # order by flux if len(objects) == 0: return [] return sorted(objects, cmp=lambda x,y: cmp(y['flux'],x['flux']))
def find_objects(obs, segmentation_map=False): import sep noise=np.sqrt(1.0/obs.weight[0,0]) return sep.extract( obs.image, SEP_THRESH, segmentation_map=segmentation_map, err=noise, **SEP_PARS )
def ImageObjDetection(self,filter): print 'SEP object detection on %s......'%filter sigma = 3.0 threshold = sigma * self.bkgRMS[filter] objs = sep.extract(self.dataList[filter], threshold, minarea=20) # add fields for mag and magerr desc = np.dtype([('RA','float64'),('DEC','float64')]) newObjs = np.zeros(objs.shape, dtype = objs.dtype.descr + desc.descr) for name in objs.dtype.names: newObjs[name] = objs[name] # add RA and DEC to objs newObjs['RA'], newObjs['DEC'] = self.images[filter].xy_to_rd(objs['x'],objs['y']) return newObjs
def test_extract_segmentation_map(): # Get some background-subtracted test data: data = np.copy(image_data) bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3) bkg.subfrom(data) objects, segmap = sep.extract(data, 1.5*bkg.globalrms, segmentation_map=True) assert type(segmap) is np.ndarray assert segmap.shape == data.shape for i in range(len(objects)): assert objects["npix"][i] == (segmap == i+1).sum()
def find_centroid(data, t): ''' filter_kernel = makeGaussian(17, 5. , 0 , np.array([8.5,8.5])) source = extract(data , t, filter_kernel=filter_kernel) ''' #t=20 #source = extract(data , t) #source = extract(data, 100) #source = extract(data, t) #source = extract(data, 500) source = extract(data, t) a = source['a'] b = source['b'] #print 'a: {0}'.format(a) #print 'b: {0}'.format(b) flux = source['cflux'] #arg = np.argsort(flux)[-1] try: arg = np.argsort(flux)[-1] except IndexError: return None #print flux x = source['x'][arg] y = source['y'][arg] try: #fwhm = np.sqrt(np.max(a)*np.max(b)) fwhm = np.sqrt(a[arg]*b[arg]) print 'fwhm:{0}'.format(fwhm) except ValueError: return None size = data.shape[0] zero = size/2 + .5 kernel = makeGaussian(17, fwhm , 0 , np.array([8.5,8.5])) img = signal.convolve2d(data , kernel , mode = "same") max_value = np.max(img) xi, yi = np.unravel_index(np.argmax(img), img.shape) if (xi >= 1 and xi < img.shape[0] - 1 and yi >= 1 and yi < img.shape[1] - 1): ox, oy = fit_3x3(img[xi-1:xi+2, yi-1:yi+2]) else: ox , oy = 0. , 0. if (np.absolute(ox) >3) or (np.absolute(oy)>3): ox, oy = 0., 0. #return xi + ox + .5 , yi + oy + .5, max_value, flux[arg[-1]] print xi,yi, ox, oy return xi + ox + .5 , yi + oy + .5, max_value, flux[arg]
def test_extract_matched_filter_at_edge(): """Exercise bug where bright star at end of image not detected with noise array and matched filter on.""" data = np.zeros((20, 20)) err = np.ones_like(data) kernel = np.array([[1., 2., 1.], [2., 4., 2.], [1., 2., 1.]]) data[18:20, 9:12] = kernel[0:2, :] objects, pix = sep.extract(data, 2.0, err=err, filter_kernel=kernel, filter_type="matched", segmentation_map=True) assert len(objects) == 1 assert objects["npix"][0] == 6
def find_stars(fn, hdu, verbose = 0, useDS9 = False, cube = None): """Find stars on the image. Returns flux ordered list of stars.""" if cube is None: data = np.array(hdu[0].data,np.int32) else: data = np.array(hdu[0].data[cube],np.int32) bkg = sep.Background(data) bkg.subfrom(data) thres = 1.5 * bkg.globalrms if verbose > 1: print 'global average background: {0:.2f} rms: {1:.3f} threshold: {2:.3f}'.format(bkg.globalback, bkg.globalrms, thres) objects = sep.extract(data, thres) # order by flux if len(objects) == 0: return None return sorted(objects, cmp=lambda x,y: cmp(y['flux'],x['flux']))
def getobjects( data, thresh=5.0 ): if data.dtype is not 'float32': data = np.array(data, dtype="float32") bkg = sep.Background(data) bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3) back = bkg.back() rms = bkg.rms() bkg.subfrom(data) thresh = thresh * bkg.globalrms objects = sep.extract(data, thresh) #print "there are ", len(objects), "objects" return objects
def extract(data): bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3) bkg.subfrom(data) objs = sep.extract(data, 1.5*bkg.globalrms) flux, fluxerr, flag = sep.sum_circle(data, objs['x'], objs['y'], 5., err=bkg.globalrms) kr, flag = sep.kron_radius(data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], 6.0) eflux, efluxerr, eflag = sep.sum_ellipse(data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], r=2.5 * kr, err=bkg.globalrms, subpix=1) retstr = "" for i in range(len(objs['x'])): retstr = retstr+(str(objs['x'][i])+"\t"+str(objs['y'][i])+"\t"+str(flux[i])+"\t"+str(fluxerr[i])+"\t"+str(kr[i])+"\t"+str(eflux[i])+"\t"+str(efluxerr[i])+"\t"+str(flag[i])+"\n") return retstr
def test_extract_with_mask(): # Get some background-subtracted test data: data = np.copy(image_data) bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3) bkg.subfrom(data) # mask half the image ylim = data.shape[0] // 2 mask = np.zeros(data.shape, dtype=np.bool) mask[ylim:,:] = True objects = sep.extract(data, 1.5*bkg.globalrms, mask=mask) # check that we found some objects and that they are all in the unmasked # region. assert len(objects) > 0 assert np.all(objects['y'] < ylim)
def create_object_catalog(self, arr, threshold=3.0, border=0): if border > 0: wmap = numpy.ones_like(arr) wmap[border:-border, border:-border] = 0 else: wmap = None bkg = sep.Background(arr) data_sub = arr - bkg objects, objmask = sep.extract( data_sub, threshold, err=bkg.globalrms * numpy.ones_like(data_sub), mask=wmap, segmentation_map=True ) return objects, objmask
def segmentation_combined(data, snr_detect=10.0, fwhm=4.0, npixels=15, mask_corners=False): import sep from astropy.convolution import Gaussian2DKernel from astropy.stats import gaussian_fwhm_to_sigma box_shape = [64, 64] _logger.info('point source detection') # Corners mask = numpy.zeros_like(data, dtype='int32') if mask_corners: # Remove corner regions _logger.info('using internal mask to remove corners') mask[2000:, 0:80] = 1 mask[2028:, 2000:] = 1 mask[:50, 1950:] = 1 mask[:100, :50] = 1 _logger.info('compute background map, %s', box_shape) bkg = sep.Background(data) _logger.info('reference fwhm is %5.1f pixels', fwhm) _logger.info('convolve with gaussian kernel, FWHM %3.1f pixels', fwhm) sigma = fwhm * gaussian_fwhm_to_sigma # kernel = Gaussian2DKernel(sigma) kernel.normalize() _logger.info('background level is %5.1f', bkg.globalback) _logger.info('background rms is %5.1f', bkg.globalrms) _logger.info('detect threshold, %3.1f sigma over background', snr_detect) thresh = snr_detect * bkg.globalrms data_s = data - bkg.back() try: objects, segmap = sep.extract(data_s, thresh, minarea=npixels, filter_kernel=kernel.array, segmentation_map=True, mask=mask) _logger.info('detected %d objects', len(objects)) except Exception as error: _logger.warning("%s", error) segmap = numpy.zeros_like(data_s, dtype='int') return segmap
def go_test(cat, image_data, thresh): threshold = thresh*bkg.globalrms #make the extraction sources = sep.extract(image_data, threshold) cat.to_pandas() sources = pd.DataFrame(sources) #crossmatch the cats S = np.array([cat['x'], cat['y']]).T O = np.array([sources['x'], sources['y']]).T #right distr, indr = cx.crossmatch(S, O, max_distance=0.3) matchsr = ~np.isinf(distr) #left distl, indl = cx.crossmatch(O, S, max_distance=0.3) matchsl = ~np.isinf(distl) ## objID = np.zeros_like(O[:,0]) -1 CSTARID = np.zeros_like(O[:,0]) -1 CSTARMAG = np.zeros_like(O[:,0]) -1 for i in range(len(O)): if distl[i] != np.inf: dist_o = distl[i] ind_o = indl[i] # now ind is a star number # lets see if that star has matched the same obj if distr[ind_o] != np.inf: dist_s = distr[ind_o] ind_s = indr[ind_o] if ind_s == i: objID[i] = ind_o CSTARID[i] = cat['cstarid'][ind_o] CSTARMAG[i] = cat['imag'][ind_o] sources['objID'] = objID sources['cstarid'] = CSTARID sources['threshold'] = np.repeat(thresh, len(sources)) sources['cstarmag'] = CSTARMAG #sources['was_a_hit'] = objID > 0. # report the hits as detections n_hits = sum(objID > 0.) print 'Number of hits ==> {}'.format(n_hits) # report the false detections n_false= sum(objID < 0.) print 'Number of falses ==> {}'.format(n_false) return sources
def find_source(data, t): source = extract(data , t) a = source['a'] b = source['b'] x = source['x'] y = source['y'] flux = source['flux'] print x print y print flux arg = np.argsort(flux) print x[arg[-1]], y[arg[-1]], flux[arg[-1]] try: fwhm = np.sqrt(np.max(a)*np.max(b)) print fwhm except ValueError: return None return x[arg[-1]], y[arg[-1]], flux[arg[-1]]
def test_extract_with_noise_convolution(): """Test extraction when there is both noise and convolution. This will use the matched filter implementation, and will handle bad pixels and edge effects gracefully. """ # Start with an empty image where we label the noise as 1 sigma everywhere. image = np.zeros((20, 20)) error = np.ones((20, 20)) # Add some noise representing bad pixels. We do not want to detect these. image[17, 3] = 100. error[17, 3] = 100. image[10, 0] = 100. error[10, 0] = 100. image[17, 17] = 100. error[17, 17] = 100. # Add some real point sources that we should find. image[3, 17] = 10. image[6, 6] = 2.0 image[7, 6] = 1.0 image[5, 6] = 1.0 image[6, 5] = 1.0 image[6, 7] = 1.0 objects = sep.extract(image, 2.0, minarea=1, err=error) objects.sort(order=['x', 'y']) # Check that we recovered the two correct objects and not the others. assert len(objects) == 2 assert_approx_equal(objects[0]['x'], 6.) assert_approx_equal(objects[0]['y'], 6.) assert_approx_equal(objects[1]['x'], 17.) assert_approx_equal(objects[1]['y'], 3.)
bkg.subfrom(data) # subtract it t1 = time.time() print("subtract background: {0:6.2f} ms".format((t1-t0) * 1.e3)) t0 = time.time() backarr = bkg.back(dtype=np.float64) # background t1 = time.time() print("background array: {0:6.2f} ms".format((t1-t0) * 1.e3)) t0 = time.time() rmsarr = bkg.rms() t1 = time.time() print("rms array: {0:6.2f} ms".format((t1-t0) * 1.e3)) t0 = time.time() objects = sep.extract(data, 1.5 * bkg.globalrms) t1 = time.time() print("extract: {0:6.2f} ms [{1:d} objects]" .format((t1-t0) * 1.e3, len(objects))) #-------------------------------------------------------------------------- # Background subtraction print("") if HAVE_PHOTUTILS: print("sep version: ", sep.__version__) print("photutils version:", photutils.__version__) print(""" | test | sep | photutils | ratio | |-------------------------|-----------------|-----------------|--------|""") blankline = \
def do_stage(self, images): for i, image in enumerate(images): try: # Set the number of source pixels to be 5% of the total. This keeps us safe from # satellites and airplanes. sep.set_extract_pixstack(int(image.nx * image.ny * 0.05)) data = image.data.copy() error = (np.abs(data) + image.readnoise ** 2.0) ** 0.5 mask = image.bpm > 0 # Fits can be backwards byte order, so fix that if need be and subtract # the background try: bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3) except ValueError: data = data.byteswap(True).newbyteorder() bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3) bkg.subfrom(data) # Do an initial source detection # TODO: Add back in masking after we are sure SEP works sources = sep.extract(data, self.threshold, minarea=self.min_area, err=error, deblend_cont=0.005) # Convert the detections into a table sources = Table(sources) # Calculate the ellipticity sources['ellipticity'] = 1.0 - (sources['b'] / sources['a']) # Fix any value of theta that are invalid due to floating point rounding # -pi / 2 < theta < pi / 2 sources['theta'][sources['theta'] > (np.pi / 2.0)] -= np.pi sources['theta'][sources['theta'] < (-np.pi / 2.0)] += np.pi # Calculate the kron radius kronrad, krflag = sep.kron_radius(data, sources['x'], sources['y'], sources['a'], sources['b'], sources['theta'], 6.0) sources['flag'] |= krflag sources['kronrad'] = kronrad # Calcuate the equivilent of flux_auto flux, fluxerr, flag = sep.sum_ellipse(data, sources['x'], sources['y'], sources['a'], sources['b'], np.pi / 2.0, 2.5 * kronrad, subpix=1, err=error) sources['flux'] = flux sources['fluxerr'] = fluxerr sources['flag'] |= flag # Calculate the FWHMs of the stars: fwhm = 2.0 * (np.log(2) * (sources['a'] ** 2.0 + sources['b'] ** 2.0)) ** 0.5 sources['fwhm'] = fwhm # Cut individual bright pixels. Often cosmic rays sources = sources[fwhm > 1.0] # Measure the flux profile flux_radii, flag = sep.flux_radius(data, sources['x'], sources['y'], 6.0 * sources['a'], [0.25, 0.5, 0.75], normflux=sources['flux'], subpix=5) sources['flag'] |= flag sources['fluxrad25'] = flux_radii[:, 0] sources['fluxrad50'] = flux_radii[:, 1] sources['fluxrad75'] = flux_radii[:, 2] # Calculate the windowed positions sig = 2.0 / 2.35 * sources['fluxrad50'] xwin, ywin, flag = sep.winpos(data, sources['x'], sources['y'], sig) sources['flag'] |= flag sources['xwin'] = xwin sources['ywin'] = ywin # Calculate the average background at each source bkgflux, fluxerr, flag = sep.sum_ellipse(bkg.back(), sources['x'], sources['y'], sources['a'], sources['b'], np.pi / 2.0, 2.5 * sources['kronrad'], subpix=1) #masksum, fluxerr, flag = sep.sum_ellipse(mask, sources['x'], sources['y'], # sources['a'], sources['b'], np.pi / 2.0, # 2.5 * kronrad, subpix=1) background_area = (2.5 * sources['kronrad']) ** 2.0 * sources['a'] * sources['b'] * np.pi # - masksum sources['background'] = bkgflux sources['background'][background_area > 0] /= background_area[background_area > 0] # Update the catalog to match fits convention instead of python array convention sources['x'] += 1.0 sources['y'] += 1.0 sources['xpeak'] += 1 sources['ypeak'] += 1 sources['xwin'] += 1.0 sources['ywin'] += 1.0 sources['theta'] = np.degrees(sources['theta']) image.catalog = sources['x', 'y', 'xwin', 'ywin', 'xpeak', 'ypeak', 'flux', 'fluxerr', 'background', 'fwhm', 'a', 'b', 'theta', 'kronrad', 'ellipticity', 'fluxrad25', 'fluxrad50', 'fluxrad75', 'x2', 'y2', 'xy', 'flag'] # Add the units and description to the catalogs image.catalog['x'].unit = 'pixel' image.catalog['x'].description = 'X coordinate of the object' image.catalog['y'].unit = 'pixel' image.catalog['y'].description = 'Y coordinate of the object' image.catalog['xwin'].unit = 'pixel' image.catalog['xwin'].description = 'Windowed X coordinate of the object' image.catalog['ywin'].unit = 'pixel' image.catalog['ywin'].description = 'Windowed Y coordinate of the object' image.catalog['xpeak'].unit = 'pixel' image.catalog['xpeak'].description = 'X coordinate of the peak' image.catalog['ypeak'].unit = 'pixel' image.catalog['ypeak'].description = 'Windowed Y coordinate of the peak' image.catalog['flux'].unit = 'counts' image.catalog['flux'].description = 'Flux within a Kron-like elliptical aperture' image.catalog['fluxerr'].unit = 'counts' image.catalog['fluxerr'].description = 'Erronr on the flux within a Kron-like elliptical aperture' image.catalog['background'].unit = 'counts' image.catalog['background'].description = 'Average background value in the aperture' image.catalog['fwhm'].unit = 'pixel' image.catalog['fwhm'].description = 'FWHM of the object' image.catalog['a'].unit = 'pixel' image.catalog['a'].description = 'Semi-major axis of the object' image.catalog['b'].unit = 'pixel' image.catalog['b'].description = 'Semi-minor axis of the object' image.catalog['theta'].unit = 'degrees' image.catalog['theta'].description = 'Position angle of the object' image.catalog['kronrad'].unit = 'pixel' image.catalog['kronrad'].description = 'Kron radius used for extraction' image.catalog['ellipticity'].description = 'Ellipticity' image.catalog['fluxrad25'].unit = 'pixel' image.catalog['fluxrad25'].description = 'Radius containing 25% of the flux' image.catalog['fluxrad50'].unit = 'pixel' image.catalog['fluxrad50'].description = 'Radius containing 50% of the flux' image.catalog['fluxrad75'].unit = 'pixel' image.catalog['fluxrad75'].description = 'Radius containing 75% of the flux' image.catalog['x2'].unit = 'pixel^2' image.catalog['x2'].description = 'Variance on X coordinate of the object' image.catalog['y2'].unit = 'pixel^2' image.catalog['y2'].description = 'Variance on Y coordinate of the object' image.catalog['xy'].unit = 'pixel^2' image.catalog['xy'].description = 'XY covariance of the object' image.catalog['flag'].description = 'Bit mask combination of extraction and photometry flags' image.catalog.sort('flux') image.catalog.reverse() logging_tags = logs.image_config_to_tags(image, self.group_by_keywords) logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename)) # Save some background statistics in the header mean_background = stats.sigma_clipped_mean(bkg.back(), 5.0) image.header['L1MEAN'] = (mean_background, '[counts] Sigma clipped mean of frame background') logs.add_tag(logging_tags, 'L1MEAN', float(mean_background)) median_background = np.median(bkg.back()) image.header['L1MEDIAN'] = (median_background, '[counts] Median of frame background') logs.add_tag(logging_tags, 'L1MEDIAN', float(median_background)) std_background = stats.robust_standard_deviation(bkg.back()) image.header['L1SIGMA'] = (std_background, '[counts] Robust std dev of frame background') logs.add_tag(logging_tags, 'L1SIGMA', float(std_background)) # Save some image statistics to the header good_objects = image.catalog['flag'] == 0 seeing = np.median(image.catalog['fwhm'][good_objects]) * image.pixel_scale image.header['L1FWHM'] = (seeing, '[arcsec] Frame FWHM in arcsec') logs.add_tag(logging_tags, 'L1FWHM', float(seeing)) mean_ellipticity = stats.sigma_clipped_mean(sources['ellipticity'][good_objects], 3.0) image.header['L1ELLIP'] = (mean_ellipticity, 'Mean image ellipticity (1-B/A)') logs.add_tag(logging_tags, 'L1ELLIP', float(mean_ellipticity)) mean_position_angle = stats.sigma_clipped_mean(sources['theta'][good_objects], 3.0) image.header['L1ELLIPA'] = (mean_position_angle, '[deg] PA of mean image ellipticity') logs.add_tag(logging_tags, 'L1ELLIPA', float(mean_position_angle)) self.logger.info('Extracted sources', extra=logging_tags) except Exception as e: logging_tags = logs.image_config_to_tags(image, self.group_by_keywords) logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename)) self.logger.error(e, extra=logging_tags) return images
def internalskysub(listob, skymask, deepwhite=None): """ Perform sky-subtraction using pixels within the cube listob -> OBs to loop on skymask -> if defined to a ds9 region file (iamge coordinate), compute sky in these regions (excluding sources) Otherwise mask sources and use all the pixels in the field. """ import os import glob from astropy.io import fits import numpy as np import zap import matplotlib.pyplot as plt import sep # grab top dir topdir = os.getcwd() # now loop over each folder and make the final illcorrected cubes for ob in listob: # change dir os.chdir(ob + "/Proc/") print("Processing {} for sky subtraction correction".format(ob)) # Search how many exposures are there scils = glob.glob("OBJECT_RED_0*.fits*") nsci = len(scils) # loop on exposures and reduce frame with zeroth order sky subtraction + ZAP for exp in range(nsci): # do pass on IFUs print("Interal sky subtraction of exposure {}".format(exp + 1)) # define names oldcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp + 1) oldimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp + 1) newcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_lineskysub.fits".format(exp + 1) newimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_lineskysub.fits".format(exp + 1) ifumask_iname = "IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp + 1) source_mask = "IMAGE_SOURCEMASK_LINEWCS_EXP{0:d}.fits".format(exp + 1) zapcube = "DATACUBE_FINAL_LINEWCS_EXP{0:d}_zapsky.fits".format(exp + 1) zapimage = "IMAGE_FOV_LINEWCS_EXP{0:d}_zapsky.fits".format(exp + 1) zapsvdout = "ZAPSVDOUT_EXP{0:d}.fits".format(exp + 1) if not os.path.isfile(zapcube): # open the cube cube = fits.open(oldcube) # open mask ifu ifumask = fits.open(ifumask_iname) # if white image provided load it if deepwhite: print("Use source mask image {}".format(deepwhite)) whsrc = fits.open(topdir + "/" + deepwhite) whitesource = whsrc[0].data.byteswap().newbyteorder() else: # create from cube print("Create source mask image from cube") whitesource = np.nanmedian(cube[1].data, axis=0) # now create a source mask print("Create a source mask") header = cube[1].header bkg = sep.Background(whitesource) bkg_subtraced_data = whitesource - bkg.back() thresh = 3.0 * bkg.globalrms minarea = 20.0 clean = True segmap = np.zeros((header["NAXIS2"], header["NAXIS1"])) # extract objects objects, segmap = sep.extract( bkg_subtraced_data, thresh, segmentation_map=True, minarea=minarea, clean=clean ) # plt.imshow(segmap,origin='low') # plt.show() # plt.imshow(whitesource,origin='low') # plt.show() # define geometry nwave = cube[1].header["NAXIS3"] nx = cube[1].header["NAXIS1"] ny = cube[1].header["NAXIS2"] # make sure pixels are sky sub once and only once countsub = np.copy(ifumask[1].data) * 0.0 # if mask is set do a corse median sky subtraction if skymask: print("Constructing sky mask") # for zap, sky region should be 0, and sources >1 skybox = np.zeros((ny, nx)) + 1 # construct the sky region mask from mypython.fits import pyregmask as pmk mysky = pmk.PyMask(nx, ny, "../../" + skymask, header=cube[1].header) for ii in range(mysky.nreg): mysky.fillmask(ii) usepix = np.where(mysky.mask > 0) skybox[usepix] = 0 # plt.imshow(skybox,origin='low') # plt.show() # plt.imshow(segmap,origin='low') # plt.show() # plt.imshow(ifumask[1].data,origin='low') # plt.show() # exit() # now do median sky subtraction # loop over wavelength for ww in range(nwave): # extract sky slice skyimg = cube[1].data[ww, :, :] # grab pixels with no source and in mask region # avoid edges not flagged by IFU mask pixels = np.where((skybox < 1) & (segmap < 1) & (ifumask[1].data > 0)) # compute sky in good regions medsky = np.nanmedian(skyimg[pixels]) # subtract from all pixels cube[1].data[ww, :, :] = skyimg - medsky else: # otherwise do coarse sky IFU by IFU # loop over ifu for iff in range(24): thisifu = (iff + 1) * 100.0 nextifu = (iff + 2) * 100.0 + 1 # grab pixels in ifu without sources pixels = np.where((ifumask[1].data >= thisifu) & (ifumask[1].data < nextifu) & (segmap < 1)) pixels_ifu = np.where( (ifumask[1].data >= thisifu) & (ifumask[1].data < nextifu) & (countsub < 1) ) # update used pixels countsub[pixels_ifu] = 1 # loop over wavelength for ww in range(nwave): skyimg = cube[1].data[ww, :, :] # compute sky in good regions medsky = np.nanmedian(skyimg[pixels]) # subtract from all IFU pixels skyimg[pixels_ifu] = skyimg[pixels_ifu] - medsky cube[1].data[ww, :, :] = skyimg # write final cube cube.writeto(newcube, clobber=True) # create white image print("Creating final white image") white_new = np.zeros((ny, nx)) for xx in range(nx): for yy in range(ny): white_new[yy, xx] = np.nansum(cube[1].data[:, yy, xx]) / nwave # save projected image hdu1 = fits.PrimaryHDU([]) hdu2 = fits.ImageHDU(white_new) hdu2.header = cube[1].header hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto(newimage, clobber=True) # save segmap # make it redundant to be sure ZAP read right extension hdu1 = fits.PrimaryHDU(segmap) # hdu1.header=header hdu2 = fits.ImageHDU(segmap) # hdu2.header=header hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto(source_mask, clobber=True) print("Running ZAP on exposure {}".format(exp + 1)) # deal with masks if skymask: # combine sky mask with source mask # make it redundant to be sure ZAP read right extension tmpzapmask = segmap + skybox hdu1 = fits.PrimaryHDU(tmpzapmask) # hdu1.header=header hdu2 = fits.ImageHDU(tmpzapmask) # hdu2.header=header hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto("ZAP_" + source_mask, clobber=True) zapmask = "ZAP_" + source_mask else: zapmask = source_mask # clean old if exists try: os.remove(zapsvdout) except: pass # run new zap.process(newcube, outcubefits=zapcube, clean=True, svdoutputfits=zapsvdout, mask=zapmask) # create white image from zap cube cube = fits.open(zapcube) print("Creating final white image from ZAP") white_new = np.zeros((ny, nx)) for xx in range(nx): for yy in range(ny): white_new[yy, xx] = np.nansum(cube[1].data[:, yy, xx]) / nwave # save projected image hdu1 = fits.PrimaryHDU([]) hdu2 = fits.ImageHDU(white_new) hdu2.header = cube[1].header hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto(zapimage, clobber=True) else: print("ZAP cube exist alread for exposure {}... skip!".format(exp + 1)) # back to top for next OB os.chdir(topdir)