def prep_image(data): image = np.hstack([ data[:, :1024] - biweight(data[-43:, :1024]), data[:, 1124:] - biweight(data[-43:, 1124:]) ]) image = np.rot90(image) image = np.fliplr(image) return image
def prep_image(data): if args.binned: image = data[:, :1024] - biweight(data[:, 1026:]) else: image = data[:, :2048] - biweight(data[:, 2052:]) new_image = np.zeros((len(image) + addrows, image.shape[1])) new_image[addrows:, :] = image return new_image
def subtract_sky(spectra, good): nfibs, N = spectra.shape n1 = int(1. / 3. * N) n2 = int(2. / 3. * N) y = biweight(spectra[:, n1:n2], axis=1) mask, cont = identify_sky_pixels(y[good], size=15) m1 = ~good m1[good] = mask skyfibers = ~m1 init_sky = biweight(spectra[skyfibers], axis=0) return spectra - init_sky[np.newaxis, :]
def get_continuum(spectra, nbins=25): ''' Get continuum from sky-subtracted spectra Parameters ---------- spectra : 2d numpy array sky-subtracted spectra for each fiber Returns ------- cont : 2d numpy array continuum normalization ''' a = np.array([ biweight(f, axis=1) for f in np.array_split(spectra, nbins, axis=1) ]).swapaxes(0, 1) x = np.array([ np.mean(xi) for xi in np.array_split(np.arange(spectra.shape[1]), nbins) ]) cont = np.zeros(spectra.shape) X = np.arange(spectra.shape[1]) for i, ai in enumerate(a): sel = np.isfinite(ai) if np.sum(sel) > nbins / 2.: I = interp1d(x[sel], ai[sel], kind='quadratic', fill_value=np.nan, bounds_error=False) cont[i] = I(X) else: cont[i] = 0.0 return cont
def get_wavelength(spectrum, trace, use_kernel=True, limit=50): init_fiber = fiberref wavelength = 0. * spectrum loc = xref * 1. W = np.zeros((trace.shape[0], len(lines))) W[init_fiber] = loc for i in np.arange(init_fiber)[::-1]: loc = get_arclines_fiber(spectrum[i], loc, limit=limit, use_kernel=use_kernel) W[i] = loc loc = xref * 1. for i in np.arange(init_fiber + 1, spectrum.shape[0]): loc = get_arclines_fiber(spectrum[i], loc, limit=limit, use_kernel=use_kernel) W[i] = loc X = W * 0. xall = np.arange(trace.shape[1]) res = W[0, :] * 0. for i in np.arange(W.shape[1]): x = 0. * W[:, i] for j in np.arange(W.shape[0]): x[j] = np.interp(W[j, i], xall, trace[j]) X[:, i] = np.polyval(np.polyfit(x, W[:, i], 4), x) dummy, res[i] = biweight(X[:, i] - W[:, i], calc_std=True) for j in np.arange(W.shape[0]): wavelength[j] = np.polyval(np.polyfit(X[j], lines, 3), xall) return wavelength, res, X, W
def get_fiber_to_fiber(spectrum, n_chunks=100): average = biweight(spectrum, axis=0) initial_ftf = spectrum / average[np.newaxis, :] X = np.arange(spectrum.shape[1]) x = np.array([np.mean(chunk) for chunk in np.array_split(X, n_chunks)]) ftf = spectrum * 0. for i in np.arange(len(spectrum)): y = np.array([ biweight(chunk) for chunk in np.array_split(initial_ftf[i], n_chunks) ]) sel = np.isfinite(y) I = interp1d(x[sel], y[sel], kind='quadratic', bounds_error=False, fill_value='extrapolate') ftf[i] = I(X) return initial_ftf, ftf
def reduce(fn, biastime_list, masterbias_list, flttime_list, trace_list, wave_time, wave_list, ftf_list, pca=None): f = fits.open(fn) t = Time(f[0].header['DATE-OBS'] + 'T' + f[0].header['UT']) mtime = t.mjd masterbias = masterbias_list[get_cal_index(mtime, biastime_list)] image, E = base_reduction(f[0].data, masterbias) trace, good = trace_list[get_cal_index(mtime, flttime_list)] spec = get_spectra(image, trace) specerr = get_spectra_error(E, trace) chi2 = get_spectra_chi2(masterflt - masterbias, image, E, trace) badpix = chi2 > 20. specerr[badpix] = np.nan spec[badpix] = np.nan wavelength = wave_list[get_cal_index(mtime, wave_time)] specrect, errrect = rectify(spec, specerr, wavelength, def_wave) ftf = ftf_list[get_cal_index(mtime, flttime_list)] specrect[:] /= (ftf * f[0].header['EXPTIME']) errrect[:] /= (ftf * f[0].header['EXPTIME']) skymask, cont = get_skymask(biweight(specrect, axis=0), size=25) skysubrect = subtract_sky(specrect, good) if pca is None: pca = get_arc_pca(skysubrect, good, skymask, components=pca_comp) return pca skymask[1:] += skymask[:-1] skymask[:-1] += skymask[1:] cont1 = get_continuum(skysubrect, skymask, nbins=50) Z = skysubrect - cont1 res = get_residual_map(Z, pca) res[:, ~skymask] = 0.0 write_fits(skysubrect - res, skysubrect, specrect, errrect, f[0].header) return biweight(specrect, axis=0), cont
def get_continuum(skysub, masksky, nbins=50): bigcont = skysub * 0. for j in np.arange(skysub.shape[0]): y = skysub[j] * 1. y[masksky] = np.nan x = np.array([ np.mean(chunk) for chunk in np.array_split(np.arange(len(y)), nbins) ]) z = np.array([biweight(chunk) for chunk in np.array_split(y, nbins)]) sel = np.isfinite(z) if sel.sum() > 5: I = interp1d(x[sel], z[sel], kind='quadratic', bounds_error=False, fill_value='extrapolate') bigcont[j] = I(np.arange(len(y))) return bigcont
def get_wavelength(spectrum, trace, good, use_kernel=True, limit=100): init_fiber = fiberref wavelength = 0. * spectrum loc = xref * 1. W = np.zeros((trace.shape[0], len(lines))) W[init_fiber] = loc for i in np.arange(init_fiber)[::-1]: mask, cont = identify_sky_pixels(spectrum[i]) y = spectrum[i] - cont if good[i]: loc = get_arclines_fiber(y, loc, limit=limit, use_kernel=use_kernel) W[i] = loc loc = xref * 1. for i in np.arange(init_fiber + 1, spectrum.shape[0]): mask, cont = identify_sky_pixels(spectrum[i]) y = spectrum[i] - cont if good[i]: loc = get_arclines_fiber(y, loc, limit=limit, use_kernel=use_kernel) W[i] = loc X = W * 0. xall = np.arange(trace.shape[1]) res = W[0, :] * 0. for i in np.arange(W.shape[1]): x = 0. * W[:, i] bad = np.where(~good)[0] gind = np.where(good)[0] for b in bad: W[b, i] = W[gind[np.argmin(np.abs(b - gind))], i] for j in np.arange(W.shape[0]): x[j] = np.interp(W[j, i], xall, trace[j]) sel = W[:, i] > 0. X[:, i] = np.polyval(np.polyfit(x[sel], W[sel, i], 4), x) dummy, res[i] = biweight(X[:, i] - W[:, i], calc_std=True) for j in np.arange(W.shape[0]): wavelength[j] = np.polyval(np.polyfit(X[j], lines, 3), xall) return wavelength, res, X, W
args.folder, specid, ifuid, contid) if _info is None: args.log.error("Can't complete reduction for %s %s because of" " lack of %s files." % (ifuslot_key, amp, kind)) break specid, ifuSlot, ifuid = [ '%03d' % int(z) for z in [_info[3], ifuslot, _info[4]] ] if kind == 'sci': mastersci = _info[0] * 1. spec = get_spectra(_info[0], trace) if kind == 'twi': mastertwi = _info[0] * 1. if kind == 'zro': masterbias = _info[0] * 1. readnoise = biweight(_info[1]) args.log.info('Readnoise for %s %s: %0.2f' % (ifuslot_key, amp, readnoise)) if kind == 'drk': masterdark = _info[0] * 1. args.log.info('Getting pixel mask %03d %s' % (int(ifuslot), amp)) pixelmask = get_pixelmask(masterdark) if kind == 'flt': ifupos = get_ifucenfile(dirname, ifuid, amp) masterflt = _info[0] * 1. args.log.info('Getting trace for %s %s' % (ifuslot_key, amp)) try: trace, ref = get_trace(_info[0], specid, ifuSlot, ifuid, amp, _info[2][:8], dirname)
(int(ifuslot), amp)) if np.all(trace == 0.): continue W = [] for group in groups: _info_small = build_master_frame(group, ifuslot, amp, kind, args.log, args.folder, specid, ifuid, contid) if _info_small is None: continue shift, error, diffphase = register_translation( _info[0], _info_small[0], 100) W.append([shift, _info_small[-2], _info_small[-1]]) try: BL, RMS = biweight([w[0] for w in W], axis=0, calc_std=True) timE = Time([w[1] for w in W]).mjd temp = np.array([w[2] for w in W]) A = np.array([w[0] for w in W]) T = Table([timE, temp, A[:, 0], A[:, 1]], names=['mjd', 'temp', 'trace', 'wave']) T.write('%s_%s.dat' % (ifuslot_key.replace('S/N ', ''), amp), format='ascii.fixed_width_two_line') args.log.info( 'Trace and wavelength RMS for %s %s is: %0.2f %0.2f' % (ifuslot_key.replace('S/N ', ''), amp, RMS[0], RMS[1])) except: args.log.warning( 'Trace and wavelength RMS for %s %s failed' %
def make_collapsed_image(self, xc, yc, xloc, yloc, data, error, mask, scale=0.25, seeing_fac=1.8, boxsize=4., wrange=[3470, 5540], nchunks=11, convolve_image=False, interp_kind='linear'): ''' Collapse spectra to make a signle image on a rectified grid. This may be done for a wavelength range and using a number of chunks of wavelength to take ADR into account. Parameters ---------- xc: float The ifu x-coordinate for the center of the collapse frame yc: float The ifu y-coordinate for the center of the collapse frame xloc: numpy array The ifu x-coordinate for each fiber yloc: numpy array The ifu y-coordinate for each fiber data: numpy 2d array The calibrated spectrum for each fiber error: numpy 2d array The calibrated error spectrum for each fiber mask: numpy 2d array The good fiber wavelengths to be used in collapsed frame scale: float Pixel scale for output collapsed image seeing_fac: float seeing_fac = 2.35 * radius of the Gaussian kernel used if convolving the images to smooth out features. Unit: arcseconds boxsize: float Length of the side in arcseconds for the convolved image wrange: list The wavelength range to use for collapsing the frame nchunks: int Number of chunks used to take ADR into account when collapsing the fibers. Use a larger number for a larger wavelength. A small wavelength may only need one chunk convolve_image: bool If true, the collapsed frame is smoothed at the seeing_fac scale interp_kind: str Kind of interpolation to pixelated grid from fiber intensity Returns ------- zarray: numpy 3d array An array with length 3 for the first axis: PSF image, xgrid, ygrid ''' a, b = data.shape N = int(boxsize / scale) + 1 xl, xh = (xc - boxsize / 2., xc + boxsize / 2.) yl, yh = (yc - boxsize / 2., yc + boxsize / 2.) x, y = (np.linspace(xl, xh, N), np.linspace(yl, yh, N)) xgrid, ygrid = np.meshgrid(x, y) S = np.zeros((a, 2)) area = np.pi * 0.75**2 sel = (self.wave > wrange[0]) * (self.wave <= wrange[1]) I = np.arange(b) ichunk = [np.mean(xi) for xi in np.array_split(I[sel], nchunks)] ichunk = np.array(ichunk, dtype=int) wchunk = [ np.mean(xi) for xi in np.array_split(self.wave[sel], nchunks) ] wchunk = np.array(wchunk) cnt = 0 image_list = [] if convolve_image: seeing = seeing_fac / scale G = Gaussian2DKernel(seeing / 2.35) if interp_kind not in ['linear', 'cubic']: self.log.warning('interp_kind must be "linear" or "cubic"') self.log.warning('Using "linear" for interp_kind') interp_kind = 'linear' for chunk, echunk, mchunk in zip( np.array_split(data[:, sel], nchunks, axis=1), np.array_split(error[:, sel], nchunks, axis=1), np.array_split(mask[:, sel], nchunks, axis=1)): marray = chunk * 1. marray[mchunk < 1e-8] = np.nan #marray = np.ma.array(chunk, mask=(mchunk<1e-8)) #image = np.ma.median(marray, axis=1) #image = image / np.ma.sum(image) image = biweight(marray, axis=1) image = image / np.nansum(image) S[:, 0] = xloc - self.ADRra[ichunk[cnt]] S[:, 1] = yloc - self.ADRdec[ichunk[cnt]] cnt += 1 try: grid_z = (griddata(S[~image.mask], image.data[~image.mask], (xgrid, ygrid), method=interp_kind) * scale**2 / area) except: grid_z = 0.0 * xgrid if convolve_image: grid_z = convolve(grid_z, G) image_list.append(grid_z) image = np.array(image_list) image[np.isnan(image)] = 0.0 zarray = np.array([image, xgrid - xc, ygrid - yc]) return zarray
xg, np.arange(len(xg)), left=0., right=len(xg)) yc = np.interp(Pos[:, 1], yg, np.arange(len(yg)), left=0., right=len(yg)) xc = np.array(np.round(xc), dtype=int) yc = np.array(np.round(yc), dtype=int) gsel = (xc > 0) * (xc < len(xg)) * (yc > 0) * (yc < len(yg)) d = np.sqrt(xgrid**2 + ygrid**2) skyvalues = (binimage[yc[gsel], xc[gsel]] < 0.01) * (d[yc[gsel], xc[gsel]] > 420.) backspectra = biweight(spectra[gsel][skyvalues], axis=0) args.log.info('Average spectrum residual value: %0.3f' % np.nanmedian(backspectra)) spectra[:] -= backspectra[np.newaxis, :] collapse_image = ( np.nansum(spectra[:, wsel] * response[np.newaxis, wsel], axis=1) / np.nansum(mask * response[np.newaxis, wsel], axis=1)) collapse_eimage = np.sqrt(( np.nansum(error[:, wsel]**2 * response[np.newaxis, wsel], axis=1) / np.nansum(mask * response[np.newaxis, wsel], axis=1))) cn = np.ones((1, 2)) cn[0, 0] = 0 cn[0, 1] = len(collapse_image) image, errorimage, weight = make_image_interp(Pos, collapse_image, collapse_eimage, xg, yg,
from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import Matern, WhiteKernel from sklearn.gaussian_process.kernels import ConstantKernel import seaborn as sns filename = sys.argv[1] t = open_file(filename, mode='r') L = len(t.root.Cals) B = np.zeros((L, 2)) for i in np.arange(L): print('Working on %i' % (i + 1)) mdark = t.root.Cals.cols.masterdark[i] bl = biweight(mdark) bm = np.median(np.abs(np.diff(mdark, axis=1))) B[i, 0] = bl B[i, 1] = bm # Plot style sns.set_context('talk') sns.set_style('whitegrid') plt.figure(figsize=(7, 6)) jp = plt.scatter(B[:, 0], B[:, 1], color="firebrick", alpha=0.8, s=8) plt.xlabel("Average Global Structure (e-)") plt.ylabel("Average Local Structure (e-)") plt.axis([-0.5, 3.5, -0.5, 3.5]) ax = plt.gca() MLx = MultipleLocator(0.5) mLx = MultipleLocator(0.1) MLy = MultipleLocator(0.3)
def detect_sources(dx, dy, spec, err, mask, def_wave, psf, ran, scale, log, spec_res=5.6, thresh=5.): ''' Detection algorithm Parameters ---------- dx : 1d numpy array delta_x or delta_ra in " for each fiber compared to a given 0, 0 dy : 1d numpy array delta_y or delta_dec in " for each fiber compared to a given 0, 0 spec : 2d numpy array spectrum (sky-subtracted) for each fiber err : 2d numpy array error spectrum for each fiber chi : 2d numpy array chi2 for each spectrum compared to a fiber profile when extracted ftf : 2d numpy array initial fiber to fiber for each fiber adj : 2d numpy array adjusted fiber to fiber for better sky subtraction mask : 2d numpy array masked spectrum values for bad fiber to fiber, bad pixels, or bad chi2 seeing : float spatial seeing fwhm spec_res : float spectral resolution in pixels (2A) and refers to radius not fwhm Returns ------- ''' N1 = int((ran[1] - ran[0]) / scale) + 1 N2 = int((ran[3] - ran[2]) / scale) + 1 gridx, gridy = np.meshgrid(np.linspace(ran[0], ran[1], N1), np.linspace(ran[2], ran[3], N2)) T = np.array([psf[1].ravel(), psf[2].ravel()]).swapaxes(0, 1) I = LinearNDInterpolator(T, psf[0].ravel(), fill_value=0.0) cube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave))) errcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave))) origcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave))) origerrcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave))) mask = ~np.isfinite(spec) G = Gaussian1DKernel(spec_res / 2.35 / (def_wave[1] - def_wave[0])) cont = get_continuum(spec, nbins=25) S = spec - cont for i in np.arange(gridx.shape[0]): for j in np.arange(gridx.shape[1]): xg = gridx[i, j] yg = gridy[i, j] sel = np.where(np.sqrt((dx - xg)**2 + (dy - yg)**2) <= 4.0)[0] weights = I(dx[sel] - xg, dy[sel] - yg) norm = weights.sum() weights /= norm imask = ~(mask[sel]) X = S[sel] * 1. X[mask[sel]] = 0.0 Y = err[sel] * 1. Y[mask[sel]] = 0.0 origcube[i, j, :] = np.sum(weights[:, np.newaxis] * X * imask / Y**2, axis=0) origerrcube[i, j, :] = np.sqrt( (np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0))) w = np.sum(weights[:, np.newaxis] * imask, axis=0) * norm cube[i, j, w < 0.7] = np.nan errcube[i, j, w < 0.7] = np.nan WS = manual_convolution(origcube[i, j], G) WE = manual_convolution(origerrcube[i, j], G, error=True) #WS = origcube[i, j] #WE = origerrcube[i, j] cube[i, j, :] = WS errcube[i, j, :] = WE Y = cube / errcube bl, bm = biweight(Y.ravel(), calc_std=True) log.info('Error Correction: %0.2f' % bm) #Y[:] /= bm test = Y > thresh L = np.zeros((0, 7)) K = np.zeros((0, len(def_wave), 3)) log.info('Number of >thresh spaxels found: %i' % test.sum()) if (test.sum() > 0) * (test.sum() < 2000): ids = np.where(test) Z = Y[ids[0], ids[1], ids[2]] sid = np.argsort(Z)[::-1] ids_sorted = (ids[0][sid], ids[1][sid], ids[2][sid]) z = np.array([ gridx[ids_sorted[0], ids_sorted[1]] * 3., gridy[ids_sorted[0], ids_sorted[1]] * 3., def_wave[ids_sorted[2]] ]).swapaxes(0, 1) SN = Y[ids_sorted[0], ids_sorted[1], ids_sorted[2]] if z.shape[0] == 1: z = np.vstack([z, z]) SN = np.hstack([SN, SN]) clustering = AgglomerativeClustering(n_clusters=None, compute_full_tree=True, distance_threshold=50, linkage='complete').fit(z) z = np.array([ gridx[ids_sorted[0], ids_sorted[1]], gridy[ids_sorted[0], ids_sorted[1]], def_wave[ids_sorted[2]] ]).swapaxes(0, 1) if z.shape[0] == 1: z = np.vstack([z, z]) US = np.unique(clustering.labels_) log.info('Number of sources found: %i' % len(US)) L = np.zeros((len(US), 7)) K = np.zeros((len(US), len(def_wave), 3)) fitter = LevMarLSQFitter() for i, ui in enumerate(US): sel = clustering.labels_ == ui L[i, 0] = np.mean(z[sel, 0]) L[i, 1] = np.mean(z[sel, 1]) L[i, 2] = np.mean(z[sel, 2]) L[i, 6] = np.max(SN[sel]) dsel = np.sqrt((gridx - L[i, 0])**2 + (gridy - L[i, 1])**2) < 2.5 wi = int(np.interp(L[i, 2], def_wave, np.arange(len(def_wave)))) x = gridx[dsel] y = gridy[dsel] v = cube[:, :, wi][dsel] fsel = np.isfinite(v) xc = np.sum(x[fsel] * v[fsel]) / np.sum(v[fsel]) yc = np.sum(y[fsel] * v[fsel]) / np.sum(v[fsel]) sel = np.where(np.sqrt((dx - xc)**2 + (dy - yc)**2) <= 4.0)[0] weights = I(dx[sel] - xc, dy[sel] - yc) imask = ~(mask[sel]) X = S[sel] * 1. X[mask[sel]] = 0.0 Y = err[sel] * 1. Y[mask[sel]] = 0.0 spatial_spec = np.sum( weights[:, np.newaxis] * X * imask / Y**2, axis=0) / np.sum( weights[:, np.newaxis]**2 * imask / Y**2, axis=0) spatial_spec_err = np.sqrt( np.sum(weights[:, np.newaxis] * imask, axis=0) / np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0)) X = spec[sel] * 1. X[mask[sel]] = 0.0 Y = err[sel] * 1. Y[mask[sel]] = 0.0 spatial_spec_or = np.sum( weights[:, np.newaxis] * X * imask / Y**2, axis=0) / np.sum( weights[:, np.newaxis]**2 * imask / Y**2, axis=0) spatial_spec_err_or = np.sqrt( np.sum(weights[:, np.newaxis] * imask, axis=0) / np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0)) wsel = np.where(np.abs(def_wave - L[i, 2]) <= 8.)[0] if (~np.isfinite(spatial_spec[wsel])).sum() > 0.: L[i, :] = 0.0 continue G = Gaussian1D(mean=L[i, 2], stddev=spec_res / 2.35) G.stddev.bounds = (4. / 2.35, 8. / 2.35) G.mean.bounds = (L[i, 2] - 4., L[i, 2] + 4.) fit = fitter(G, def_wave[wsel], spatial_spec[wsel]) wc = fit.mean.value csel = np.where(np.abs(def_wave - L[i, 2]) <= 10.)[0] chi2 = (1. / (len(csel) - 3.) * np.sum( (fit(def_wave[csel]) - spatial_spec[csel])**2 / spatial_spec_err[csel]**2)) L[i, 0] = xc L[i, 1] = yc L[i, 2] = wc L[i, 3] = fit.stddev.value * 2.35 L[i, 4] = chi2 L[i, 5] = fit.amplitude.value * 2. K[i, :, 0] = spatial_spec_or K[i, :, 1] = spatial_spec_err_or K[i, :, 2] = fit(def_wave) return cube, errcube, origcube, origerrcube, L, K
E = Extract() Aother = Astrometry(bounding_box[0], bounding_box[1], pa, 0., 0.) E.get_ADR_RAdec(Aother) dra = np.interp(wave_extract[0], def_wave, E.ADRra) ddec = np.interp(wave_extract[0], def_wave, E.ADRdec) print(dra, ddec) ra -= dra / 3600. / np.cos(np.deg2rad(A.dec0)) dec -= ddec / 3600. header = tp.to_header() x, y = tp.wcs_world2pix(ra, dec, 1) xg = np.linspace(-bb, bb, N) yg = np.linspace(-bb, bb, N) xgrid, ygrid = np.meshgrid(xg, yg) Pos = np.zeros((len(x), 2)) Pos[:, 0], Pos[:, 1] = (x, y) back = biweight(spectra[:, bsel], axis=1) data = (spectra[:, wsel] - back[:, np.newaxis]) * 2. mask = np.array(np.isfinite(data), dtype=float) weight = Gmodel[np.newaxis, wsel] edata = error[:, wsel] * 2. z = np.nansum(mask * data * weight, axis=1) / np.nansum(mask * weight**2, axis=1) ze = np.sqrt(np.nansum(mask * edata**2 * weight, axis=1) / np.nansum(mask * weight**2, axis=1)) good = np.isfinite(z) image = make_image(Pos[good], z[good], xg, yg, xgrid, ygrid, 1.5 / 2.35) errorimage = make_image(Pos[good], ze[good], xg, yg, xgrid, ygrid, 1.5 / 2.35) good = np.isfinite(back) backimage = make_image(Pos[good], back[good], xg, yg, xgrid, ygrid, 1.5 / 2.35) name = op.basename(args.h5file[:-3]) + ('_%s.fits' % args.surname) header['CRPIX1'] = (N+1) / 2
def base_reduction(filename, tinfo, get_header=False): ''' Reduce filename from tarfile or fits file. Reduction steps include: 1) Overscan subtraction 2) Trim image 3) Orientation 4) Gain Multiplication 5) Error propagation Parameters ---------- filename : str Filename of the fits file get_header : boolean Flag to get and return the header tfile : str Tar filename if the fits file is in a tarred file Returns ------- a : 2d numpy array Reduced fits image, see steps above e : 2d numpy array Associated error frame ''' # Load fits file tarbase = op.dirname(op.dirname(op.dirname(filename))) + '.tar' if op.exists(tarbase): T = tinfo[0] s = '/'.join(filename.split('/')[-4:]) ind = np.where(s == np.array(tinfo[2]))[0][0] a = fits.open(T.extractfile(tinfo[1][ind])) else: a = fits.open(filename) image = np.array(a[0].data, dtype=float) # Overscan subtraction overscan_length = int(32 * (image.shape[1] / 1064)) O = biweight(image[:, -(overscan_length - 2):]) image[:] = image - O # Trim image image = image[:, :-overscan_length] # Gain multiplication (catch negative cases) gain = a[0].header['GAIN'] gain = np.where(gain > 0., gain, 0.85) rdnoise = a[0].header['RDNOISE'] rdnoise = np.where(rdnoise > 0., rdnoise, 3.) amp = (a[0].header['CCDPOS'].replace(' ', '') + a[0].header['CCDHALF'].replace(' ', '')) try: ampname = a[0].header['AMPNAME'] except: ampname = None header = a[0].header # Orient image a = orient_image(image, amp, ampname) * gain # Calculate error frame E = np.sqrt(rdnoise**2 + np.where(a > 0., a, 0.)) if get_header: return a, E, header return a, E
num = np.nansum(filtg[np.newaxis, :] * mask * (spectra / error), axis=1) denom = np.nansum(filtg[np.newaxis, :] * mask, axis=1) sn = num / denom goodspec = (mask.sum(axis=1) > 0.8) * (sn > 1.) * clean N = goodspec.sum() if N < 1: continue if niter == 0: totN += N Nshots += 1 virus_gmags = get_gmags(spectra[goodspec], weight[goodspec]) normalization = 10**(-0.4 * (gmag[goodspec] - virus_gmags)) norm_spectra = spectra[goodspec] * normalization[:, np.newaxis] osel = sn[goodspec] > 5. average_norm, std = biweight(normalization[osel], calc_std=True) if niter == 0: log.info( '%s has %i/%i and average normalization correction: %0.2f +/- %0.2f' % (name, osel.sum(), N, average_norm, std)) if niter == 1: inds = np.random.randint(N, size=20) es = np.zeros((20, 1036)) ss = np.zeros((20, 1036)) for j, ind in enumerate(inds): es[j] = h5file.root.Fibers.cols.error[ind] ss[j] = h5file.root.Fibers.cols.skyspectrum[ind] EA[III] = np.nanmedian(es, axis=0) SS[III] = np.nanmedian(ss, axis=0) item_list = get_itemlist(op.basename(h5name)) vcor, mjd = get_rv_cor(item_list)