mpl6agn['inferred tq plus error'][n].mask = mask mpl6agn['inferred tq minus error'][n].mask = mask mpl6agn['inferred tau'][n].mask = mask mpl6agn['inferred tau plus error'][n].mask = mask mpl6agn['inferred tau minus error'][n].mask = mask rbin = RadialBinning(par=RadialBinningPar(center=[0.0,0.0], pa=obsp['pa'], ell=obsp['ell'], radius_scale=obsp['reff'], radii=[0.0, -1, nbins], log_step=False)) binid = rbin.bin_index(x,y) r, theta = rbin.sma_coo.polar(x, y) bins = numpy.linspace(0, rbin.par['radii'][1]/rbin.par['radius_scale'], nbins+1) mid_bins.append((bins[:-1]+(numpy.diff(bins)/2.))) stack = SpectralStack() stack_wave, stack_flux, stack_sdev, stack_npix, stack_ivar, stack_sres, stack_covar = stack.stack_DRPFits(drpf, binid, par=SpectralStackPar('mean', False, None, 'channels', SpectralStack.parse_covariance_parameters('channels', '11'), None)) bins = numpy.linspace(0, rbin.par['radii'][1]/rbin.par['radius_scale'], nbins+1) mid_bins.append((bins[:-1]+(numpy.diff(bins)/2.))) em_model_eml_par, indx_measurements = measure_spec(stack_flux, errors=stack_sdev, ivar=stack_ivar, sres=stack_sres) emls.append(em_model_eml_par["EW"][:, np.where(emlines['name']=='Ha')].reshape(-1,1)) idms.append(indx_measurements["INDX"].reshape(-1,8)) emls_error.append(em_model_eml_par["EWERR"][:, np.where(emlines['name']=='Ha')].reshape(-1,1)) idms_error.append(indx_measurements["INDXERR"].reshape(-1,8)) np.save(str(mpl6agn[n]['plate'])+'-'+str(mpl6agn[n]['ifudsgn'].strip())+'_mpl6agn_measured_emls.npy', emls)
# Calculate the S/N and coordinates rdxqa = ReductionAssessment('SNRG', drpf, analysis_path=analysis_path) x = rdxqa.hdu['SPECTRUM'].data['SKY_COO'][:, 0] y = rdxqa.hdu['SPECTRUM'].data['SKY_COO'][:, 1] fgdpix = rdxqa.hdu['SPECTRUM'].data['FGOODPIX'] > 0.8 #binid = sqbin.bin_spaxels(x[fgdpix], y[fgdpix], par=None) #pdb.set_trace() # Setup the stacking operations stackpar = SpectralStackPar( 'mean', # Operation for stack False, # Apply a velocity registration None, # Velocity offsets for registration 'channels', # Covariance mode and parameters SpectralStack.parse_covariance_parameters('channels', 11), True, # Propagate the LSF through the stacking True) # Use pre-pixelized LSF (KHRR added this) stacker = SpectralStack() # Create a new binning method binning_method = SpatiallyBinnedSpectraDef( '5x5n', # Key for binning method 'ODonnell', # Galactic reddening function to use 3.1, # Rv for Galactic reddening 0.0, # Minimum S/N to include None, # Object with binning parameters None, # Binning class instance sqbin.bin_spaxels, # Binning function stackpar, # Object with stacking parameters stacker, # Stacking class instance
def test_register(): # Generate some line centers and redshifts nlines = 10 nspec = 20 rng = numpy.random.default_rng() line_flux = rng.uniform(low=10, high=100, size=nlines) center = rng.uniform(low=3650., high=10000, size=nlines) z = rng.uniform(low=0.03, high=0.1, size=20) cz = z * astropy.constants.c.to('km/s').value sigma = 1 # In pixels # Make the wavelength vector wave = numpy.logspace(*numpy.log10([3600., 10300.]).tolist(), 3000) # Some convenience things to make sure the Gaussian profiles are sampled in # pixels dlogw = numpy.mean(numpy.diff(numpy.log10(wave))) coff = numpy.log10(wave[0]) / dlogw x = numpy.arange(wave.size) # Construct the spectra flux = numpy.zeros((nspec, wave.size), dtype=float) for i in range(nspec): _center = center * (1 + z[i]) _center = numpy.log10(_center) / dlogw - coff for f, c in zip(line_flux, _center): flux[i] += f * pixelated_gaussian(x, c=c, s=sigma) # Stacking object stacker = SpectralStack() # Stack the spectra including the offset by the input velocity swave, sflux, sfdev, snpix, sivar, ssres, scovar = stacker.stack(wave, flux, cz=cz, log=True) # Just register the spectra to make sure the registration works; # this is a simple check on the above rwave, rflux, rivar, rsres = SpectralStack.register(wave, cz, flux, log=True) reg_stack_flux = numpy.ma.mean(rflux, axis=0) assert numpy.allclose(sflux[0], reg_stack_flux), 'Stack and by-hand check failed.' # Construct the deredshifted stack from scratch rcoff = numpy.log10(rwave[0]) / dlogw rx = numpy.arange(rwave.size) model_flux = numpy.zeros(reg_stack_flux.size, dtype=float) rcenter = numpy.log10(center) / dlogw - rcoff for f, c in zip(line_flux, rcenter): model_flux += f * pixelated_gaussian(rx, c=c, s=sigma) # Compare the stacks against truth # pyplot.plot(swave, sflux[0]) # Stacked using stacker.stack (blue) # pyplot.plot(rwave, reg_stack_flux) # Stacked by hand after registration (orange) # pyplot.plot(rwave, model_flux) # Truth (green) # pyplot.show() xcor = numpy.correlate(sflux[0], model_flux) assert numpy.argmax( xcor) == xcor.size // 2, 'Should be no lag between the model and stack' assert numpy.absolute(numpy.sum(sflux[0])/numpy.sum(model_flux) - 1) < 0.01, \ 'Stack sums should be different by less than 1%'
# Fitting functions expect data to be in 2D arrays (for now): if len(flux.shape) == 1: flux = flux.reshape(1,-1) ferr = ferr.reshape(1,-1) sres = sres.reshape(1,-1) flux_binned = flux.copy() ferr_binned = ferr.copy() sres_binned = sres.copy() x_binned = x.copy() y_binned = y.copy() z_binned = z.copy() dispersion_binned = dispersion.copy() else: # Stack the spectra wave_binned, flux_binned, fsdev_binned, npix_binned, ivar_binned, sres_binned, \ covar_binned = SpectralStack().stack(wave, flux, binid=binid, ivar=ivar, sres=sres) ferr_binned = numpy.ma.power(ivar_binned, -0.5) x_binned = numpy.array([numpy.mean(x[binid == i]) for i in numpy.unique(binid)]) y_binned = numpy.array([numpy.mean(y[binid == i]) for i in numpy.unique(binid)]) z_binned = numpy.array([numpy.mean(z[binid == i]) for i in numpy.unique(binid)]) dispersion_binned = numpy.array([numpy.mean(dispersion)]) if usr_plots: for f in flux: pyplot.plot(wave, f) pyplot.plot(wave_binned, flux_binned[0]) pyplot.show() #------------------------------------------------------------------- # Fit the stellar continuum