def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print, units='Jy/beam', e_num=1): """Run RM synthesis on 1D data. Args: data (list): Contains frequency and polarization data as either: [freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency of each channel in Hz. I (array_like): Stokes I intensity in each channel. Q (array_like): Stokes Q intensity in each channel. U (array_like): Stokes U intensity in each channel. dI (array_like): Error in Stokes I intensity in each channel. dQ (array_like): Error in Stokes Q intensity in each channel. dU (array_like): Error in Stokes U intensity in each channel. or [freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency of each channel in Hz. q (array_like): Fractional Stokes Q intensity (Q/I) in each channel. u (array_like): Fractional Stokes U intensity (U/I) in each channel. dq (array_like): Error in fractional Stokes Q intensity in each channel. du (array_like): Error in fractional Stokes U intensity in each channel. Kwargs: polyOrd (int): Order of polynomial to fit to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples (float): Number of samples across the RMSF. weightType (str): Can be "variance" or "uniform" "variance" -- Weight by uncertainty in Q and U. "uniform" -- Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit a Gaussian to the RMSF? noStokesI (bool: Is Stokes I data provided? phiNoise_radm2 (float): ???? nBits (int): Precision of floating point numbers. showPlots (bool): Show plots? debug (bool): Turn on debugging messages & plots? verbose (bool): Verbosity. log (function): Which logging function to use. units (str): Units of data. Returns: mDict (dict): Summary of RM synthesis results. aDict (dict): Data output by RM synthesis. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # freq_Hz, I, Q, U, dI, dQ, dU try: if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data if verbose: log("... success.") except Exception: if verbose: log("...failed.") # freq_Hz, q, u, dq, du try: if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if verbose: log("... success.") noStokesI = True except Exception: if verbose: log("...failed.") if debug: log(traceback.format_exc()) sys.exit() if verbose: log("Successfully read in the Stokes spectra.") # If no Stokes I present, create a dummy spectrum = unity if noStokesI: if verbose: log("Warn: no Stokes I data in use.") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr) / 2.0 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr = QArr, UArr = UArr, dIArr = dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if showPlots: if verbose: log("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModHirArr, fig=specFig, units=units) # Use the custom navigation toolbar (does not work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # if not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz / 1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz / 1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz / 1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9 ax.set_xlim( np.min(freqArr_Hz) / 1e9 - xRange * 0.05, np.max(freqArr_Hz) / 1e9 + xRange * 0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS ' + units) ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2)) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, 600.0) # Force the minimum phiMax # Faraday depth sampling. Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or all 1s (uniform) if weightType == "variance": weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType = "uniform" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log("Weight type is '%s'." % weightType) startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2, mylist = do_rmsynth_planes( dataQ=qArr, dataU=uArr, lambdaSqArr_m2=lambdaSqArr_m2, phiArr_radm2=phiArr_radm2, weightArr=weightArr, nBits=nBits, verbose=verbose, log=log, e_num=e_num) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime) # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict["p"])(freq0_Hz / 1e9) dirtyFDF *= (Ifreq0 ) # FDF is in fracpol units initially, convert back to flux # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF=dirtyFDF, phiArr=phiArr_radm2, fwhmRMSF=fwhmRMSF, dFDF=dFDFth, lamSqArr_m2=lambdaSqArr_m2, lam0Sq=lam0Sq_m2) mDict["Ifreq0"] = toscalar(Ifreq0) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU"] = toscalar(nanmedian(dQUArr)) mDict["dFDFth"] = toscalar(dFDFth) mDict["units"] = units mDict['dQUArr'] = dQUArr if fitDict["fitStatus"] >= 128: log("WARNING: Stokes I model contains negative values!") elif fitDict["fitStatus"] >= 64: log("Caution: Stokes I model has low signal-to-noise.") #Add information on nature of channels: good_channels = np.where(np.logical_and(weightArr != 0, np.isfinite(qArr)))[0] mDict["min_freq"] = float(np.min(freqArr_Hz[good_channels])) mDict["max_freq"] = float(np.max(freqArr_Hz[good_channels])) mDict["N_channels"] = good_channels.size mDict["median_channel_width"] = float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit"] / (Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=dqArr, duArr=duArr, fracPol=mDict["fracPol"], psi0_deg=mDict["polAngle0Fit_deg"], RM_radm2=mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) tmpFig.show() #add array dictionary aDict = dict() aDict["phiArr_radm2"] = phiArr_radm2 aDict["phi2Arr_radm2"] = phi2Arr_radm2 aDict["RMSFArr"] = RMSFArr aDict["freqArr_Hz"] = freqArr_Hz aDict["weightArr"] = weightArr aDict["dirtyFDF"] = dirtyFDF if verbose: # Print the results to the screen log() log('-' * 80) log('RESULTS:\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"])) log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9)) log('I freq0 = %.4g %s' % (mDict["Ifreq0"], units)) log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"], units)) log('QU Noise = %.4g %s' % (mDict["dQU"], units)) log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"], units)) log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"], units)) log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"], units)) log('FDF SNR = %.4g ' % (mDict["snrPIfit"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])) log() log('-' * 80) myfig = plotmylist(mylist) plt.show() myfig.show() # Plot the RM Spread Function and dirty FDF if showPlots: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr=phiArr_radm2, FDF=dirtyFDF, phi2Arr=phi2Arr_radm2, RMSFArr=RMSFArr, fwhmRMSF=fwhmRMSF, vLine=mDict["phiPeakPIfit_rm2"], fig=fdfFig, units=units) # Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # fdfFig.show() # Pause if plotting enabled if showPlots or debug: plt.show() # #if verbose: print "Press <RETURN> to exit ...", # input() return mDict, aDict, mylist
def make_model_I(fitsI, freqFile, polyOrd=3, cutoff=-1, prefixOut="", outDir="", debug=True, verbose=True, buffCols=10): """ Detect emission in a cube and fit a polynomial model spectrum to the emitting pixels. Create a representative noise spectrum using the residual planes. """ # Default data type # Sanity check on header dimensions print("Reading FITS cube header from '%s':" % fitsI) headI = pf.getheader(fitsI, 0) nDim = headI["NAXIS"] if nDim < 3 or nDim > 4: print("Err: only 3 or 4 dimensions supported: D = %d." % headI["NAXIS"]) sys.exit() nDim = headI["NAXIS"] #Idenfitify frequency axis: freq_axis = 0 #Default for 'frequency axis not identified' #Check for frequency axes. Because I don't know what different formatting #I might get ('FREQ' vs 'OBSFREQ' vs 'Freq' vs 'Frequency'), convert to #all caps and check for 'FREQ' anywhere in the axis name. Shouldn't be more #than one axis like that, right? for i in range(1, nDim + 1): try: if 'FREQ' in headI['CTYPE' + str(i)].upper(): freq_axis = i except: pass #The try statement is needed for if the FITS header does not # have CTYPE keywords. nBits = np.abs(headI['BITPIX']) dtFloat = "float" + str(nBits) nChan = headI["NAXIS" + str(freq_axis)] # Read the frequency vector print("Reading frequency vector from '%s'." % freqFile) freqArr_Hz = np.loadtxt(freqFile, dtype=dtFloat) freqArr_GHz = freqArr_Hz / 1e9 if nChan != len(freqArr_Hz): print("Err: frequency vector and axis 3 of cube unequal length.") sys.exit() # Measure the RMS spectrum using 2 passes of MAD on each plane # Determine which pixels have emission above the cutoff print("Measuring the RMS noise and creating an emission mask") rmsArr = np.zeros_like(freqArr_Hz) mskSrc = np.zeros((headI["NAXIS2"], headI["NAXIS1"]), dtype=dtFloat) mskSky = np.zeros((headI["NAXIS2"], headI["NAXIS1"]), dtype=dtFloat) for i in range(nChan): HDULst = pf.open(fitsI, "readonly", memmap=True) if nDim == 3: dataPlane = HDULst[0].data[i, :, :] elif nDim == 4 and freq_axis == 4: dataPlane = HDULst[0].data[i, 0, :, :] elif nDim == 4 and freq_axis == 3: dataPlane = HDULst[0].data[0, i, :, :] if cutoff > 0: idxSky = np.where(dataPlane < cutoff) else: idxSky = np.where(dataPlane) # Pass 1 rmsTmp = MAD(dataPlane[idxSky]) medTmp = np.nanmedian(dataPlane[idxSky]) # Pass 2: use a fixed 3-sigma cutoff to mask off emission idxSky = np.where(dataPlane < medTmp + rmsTmp * 3) medSky = np.nanmedian(dataPlane[idxSky]) rmsArr[i] = MAD(dataPlane[idxSky]) mskSky[idxSky] += 1 # When building final emission mask treat +ve cutoffs as absolute # values and negative cutoffs as sigma values if cutoff > 0: idxSrc = np.where(dataPlane > cutoff) else: idxSrc = np.where(dataPlane > medSky - 1 * rmsArr[i] * cutoff) mskSrc[idxSrc] += 1 # Clean up HDULst.close() del HDULst # Save the noise spectrum if outDir == '': outDir = '.' print("Saving the RMS noise spectrum in an ASCII file:") outFile = outDir + "/" + prefixOut + "Inoise.dat" print("> %s" % outFile) np.savetxt(outFile, rmsArr) # Save FITS files containing sky and source masks print("Saving sky and source mask images:") mskArr = np.where(mskSky > 0, 1.0, np.nan) headMsk = strip_fits_dims(header=headI, minDim=2) headMsk["DATAMAX"] = 1 headMsk["DATAMIN"] = 0 del headMsk["BUNIT"] fitsFileOut = outDir + "/" + prefixOut + "IskyMask.fits" print("> %s" % fitsFileOut) pf.writeto(fitsFileOut, mskArr, headMsk, output_verify="fix", overwrite=True) mskArr = np.where(mskSrc > 0, 1.0, np.nan) fitsFileOut = outDir + "/" + prefixOut + "IsrcMask.fits" print("> %s" % fitsFileOut) pf.writeto(fitsFileOut, mskArr, headMsk, output_verify="fix", overwrite=True) # Create a blank FITS file on disk using the large file method # http://docs.astropy.org/en/stable/io/fits/appendix/faq.html # #how-can-i-create-a-very-large-fits-file-from-scratch fitsModelFile = outDir + "/" + prefixOut + "Imodel.fits" print("Creating an empty FITS file on disk") print("> %s" % fitsModelFile) stub = np.zeros((10, 10, 10), dtype=dtFloat) hdu = pf.PrimaryHDU(data=stub) headModel = strip_fits_dims(header=headI, minDim=nDim) headModel["NAXIS1"] = headI["NAXIS1"] headModel["NAXIS2"] = headI["NAXIS2"] headModel["NAXIS3"] = headI["NAXIS3"] nVoxels = headI["NAXIS1"] * headI["NAXIS2"] * headI["NAXIS3"] if nDim == 4: headModel["NAXIS4"] = headI["NAXIS4"] nVoxels *= headI["NAXIS4"] while len(headModel) < (36 * 4 - 1): headModel.append() headModel.tofile(fitsModelFile, overwrite=True) with open(fitsModelFile, "rb+") as f: f.seek(len(headModel.tostring()) + (nVoxels * int(nBits / 8)) - 1) f.write(b"\0") # Feeback to user srcIdx = np.where(mskSrc > 0) srcCoords = np.rot90(np.where(mskSrc > 0)) if verbose: nPix = mskSrc.shape[-1] * mskSrc.shape[-2] nDetectPix = len(srcCoords) print("Emission present in %d spectra (%.1f percent)." % \ (nDetectPix, (nDetectPix*100.0/nPix))) # Inform user job magnitude startTime = time.time() print("Fitting %d/%d spectra." % (nDetectPix, nPix)) j = 0 nFailPix = 0 if verbose: progress(40, 0) # Loop through columns of pixels (buffers disk IO) for i in range(0, headI["NAXIS1"], buffCols): # Select the relevant pixel columns from the mask and cube mskSub = mskSrc[:, i:i + buffCols] srcCoords = np.rot90(np.where(mskSub > 0)) # Select the relevant pixel columns from the mask HDULst = pf.open(fitsI, "readonly", memmap=True) if nDim == 3: IArr = HDULst[0].data[:, :, i:i + buffCols] elif nDim == 4 and freq_axis == 3: IArr = HDULst[0].data[0, :, :, i:i + buffCols] elif nDim == 4 and freq_axis == 4: IArr = HDULst[0].data[:, 0, :, i:i + buffCols] HDULst.close() IModArr = np.ones_like(IArr, dtype=dtFloat) * medSky # Fit the spectra in turn for yi, xi in srcCoords: j += 1 if verbose: progress(40, ((j) * 100.0 / nDetectPix)) # Fit a <=5th order polynomial model to the Stokes I spectrum # Frequency axis must be in GHz to avoid overflow errors fitDict = { "fitStatus": 0, "chiSq": 0.0, "dof": len(freqArr_GHz) - polyOrd - 1, "chiSqRed": 0.0, "nIter": 0, "p": None } try: mp = fit_spec_poly5(freqArr_GHz, IArr[:, yi, xi], rmsArr, polyOrd) fitDict["p"] = mp.params fitDict["fitStatus"] = mp.status fitDict["chiSq"] = mp.fnorm fitDict["chiSqRed"] = mp.fnorm / fitDict["dof"] fitDict["nIter"] = mp.niter IModArr[:, yi, xi] = poly5(fitDict["p"])(freqArr_GHz) except Exception: nFailPix += 1 if debug: print("\nTRACEBACK:") print("-" * 80) print(traceback.format_exc()) print("-" * 80) print() print("> Setting Stokes I spectrum to NaN.\n") fitDict["p"] = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] IModArr[:, yi, xi] = np.ones_like(IArr[:, yi, xi]) * np.nan # Write the spectrum to the model file HDULst = pf.open(fitsModelFile, "update", memmap=True) if nDim == 3: HDULst[0].data[:, :, i:i + buffCols] = IModArr elif nDim == 4 and freq_axis == 3: HDULst[0].data[0, :, :, i:i + buffCols] = IModArr elif nDim == 4 and freq_axis == 4: HDULst[0].data[:, 0, :, i:i + buffCols] = IModArr HDULst.close() endTime = time.time() cputime = (endTime - startTime) print("Fitting completed in %.2f seconds." % cputime) if nFailPix > 0: print("Warn: Fitting failed on %d/%d spectra (%.1f percent)." % \ (nFailPix, nDetectPix, (nFailPix*100.0/nDetectPix)))
def run_qufit(dataFile, modelNum, outDir="", polyOrd=3, nBits=32, noStokesI=False, showPlots=False, debug=False, verbose=False): """Function controlling the fitting procedure.""" # Get the processing environment if mpiSwitch: mpiComm = MPI.COMM_WORLD mpiSize = mpiComm.Get_size() mpiRank = mpiComm.Get_rank() else: mpiSize = 1 mpiRank = 0 # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # Output prefix is derived from the input file name prefixOut, ext = os.path.splitext(dataFile) nestOut = prefixOut + "_nest/" if mpiRank == 0: if os.path.exists(nestOut): shutil.rmtree(nestOut, True) os.mkdir(nestOut) if mpiSwitch: mpiComm.Barrier() # Read the data file in the root process if mpiRank == 0: dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat) else: dataArr = None if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) # Parse the data array # freq_Hz, I, Q, U, dI, dQ, dU try: (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr if mpiRank == 0: print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]") except Exception: # freq_Hz, Q, U, dQ, dU try: (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr if mpiRank == 0: print("\nFormat [freq_Hz, Q, U, dQ, dU]") noStokesI = True except Exception: print("\nError: Failed to parse data file!") if debug: print(traceback.format_exc()) if mpiSwitch: MPI.Finalize() return # If no Stokes I present, create a dummy spectrum = unity if noStokesI: if mpiRank == 0: print("Note: no Stokes I data - assuming fractional polarisation.") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0) # Fit the Stokes I spectrum and create the fractional spectra if mpiRank == 0: dataArr = create_frac_spectra(freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr, dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, polyOrd=polyOrd, verbose=True) else: dataArr = None if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr # Plot the data and the Stokes I model fit if mpiRank == 0: print("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(10, 6)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModHirArr, fig=specFig) # Use the custom navigation toolbar try: specFig.canvas.toolbar.pack_forget() CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) except Exception: pass # Display the figure if showPlots: specFig.canvas.draw() specFig.show() #-------------------------------------------------------------------------# # Load the model and parameters from the relevant file if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum) mod = imp.load_source("m%d" % modelNum, "models_ns/m%d.py" % modelNum) global model model = mod.model # Let's time the sampler if mpiRank == 0: startTime = time.time() # Unpack the inParms structure parNames = [x["parname"] for x in mod.inParms] labels = [x["label"] for x in mod.inParms] values = [x["value"] for x in mod.inParms] bounds = [x["bounds"] for x in mod.inParms] priorTypes = [x["priortype"] for x in mod.inParms] wraps = [x["wrap"] for x in mod.inParms] nDim = len(priorTypes) fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes] nFree = sum(fixedMsk) # Set the prior function given the bounds of each parameter prior = prior_call(priorTypes, bounds, values) # Set the likelihood function given the data lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr) # Let's time the sampler if mpiRank == 0: startTime = time.time() # Run nested sampling using PyMultiNest nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict) nestArgsDict["n_params"] = nDim nestArgsDict["n_dims"] = nDim nestArgsDict["outputfiles_basename"] = nestOut nestArgsDict["LogLikelihood"] = lnlike nestArgsDict["Prior"] = prior pmn.run(**nestArgsDict) # Do the post-processing on one processor if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: # Query the analyser object for results aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut) statDict = aObj.get_stats() fitDict = aObj.get_best_fit() endTime = time.time() # NOTE: The Analyser methods do not work well for parameters with # posteriors that overlap the wrap value. Use np.percentile instead. pMed = [None] * nDim for i in range(nDim): pMed[i] = statDict["marginals"][i]['median'] lnLike = fitDict["log_likelihood"] lnEvidence = statDict["nested sampling global log-evidence"] dLnEvidence = statDict["nested sampling global log-evidence error"] # Get the best-fitting values & uncertainties directly from chains chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, pMed) p = [None] * nDim errPlus = [None] * nDim errMinus = [None] * nDim g = lambda v: (v[1], v[2] - v[1], v[1] - v[0]) for i in range(nDim): p[i], errPlus[i], errMinus[i] = \ g(np.percentile(chains[:, i], [15.72, 50, 84.27])) # Calculate goodness-of-fit parameters nData = 2.0 * len(lamSqArr_m2) dof = nData - nFree - 1 chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr) chiSqRed = chiSq / dof AIC = 2.0 * nFree - 2.0 * lnLike AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike BIC = nFree * np.log(nData) - 2.0 * lnLike # Summary of run print("") print("-" * 80) print("SUMMARY OF SAMPLING RUN:") print("#-PROCESSORS = %d" % mpiSize) print("RUN-TIME = %.2f" % (endTime - startTime)) print("DOF = %d" % dof) print("CHISQ: = %.3g" % chiSq) print("CHISQ RED = %.3g" % chiSqRed) print("AIC: = %.3g" % AIC) print("AICc = %.3g" % AICc) print("BIC = %.3g" % BIC) print("ln(EVIDENCE) = %.3g" % lnEvidence) print("dLn(EVIDENCE) = %.3g" % dLnEvidence) print("") print("-" * 80) print("RESULTS:\n") for i in range(len(p)): print("%s = %.4g (+%3g, -%3g)" % \ (parNames[i], p[i], errPlus[i], errMinus[i])) print("-" * 80) print("") # Create a save dictionary and store final p in values outFile = prefixOut + "_m%d_nest.json" % modelNum IfitDict["p"] = toscalar(IfitDict["p"].tolist()) saveDict = { "parNames": toscalar(parNames), "labels": toscalar(labels), "values": toscalar(p), "errPlus": toscalar(errPlus), "errMinus": toscalar(errMinus), "bounds": toscalar(bounds), "priorTypes": toscalar(priorTypes), "wraps": toscalar(wraps), "dof": toscalar(dof), "chiSq": toscalar(chiSq), "chiSqRed": toscalar(chiSqRed), "AIC": toscalar(AIC), "AICc": toscalar(AICc), "BIC": toscalar(BIC), "IfitDict": IfitDict } json.dump(saveDict, open(outFile, "w")) print("Results saved in JSON format to:\n '%s'\n" % outFile) # Plot the data and best-fitting model lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000) freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2) IModArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) pDict = {k: v for k, v in zip(parNames, p)} quModArr = model(pDict, lamSqHirArr_m2) specFig.clf() plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModArr, qModArr=quModArr.real, uModArr=quModArr.imag, fig=specFig) specFig.canvas.draw() # Plot the posterior samples in a corner plot chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim] iFixed = [i for i, e in enumerate(fixedMsk) if e == 0] chains = np.delete(chains, iFixed, 1) for i in sorted(iFixed, reverse=True): del (labels[i]) del (p[i]) cornerFig = corner.corner(xs=chains, labels=labels, range=[0.99999] * nFree, truths=p, quantiles=[0.1572, 0.8427], bins=30) # Save the figures outFile = nestOut + "fig_m%d_specfit.pdf" % modelNum specFig.savefig(outFile) print("Plot of best-fitting model saved to:\n '%s'\n" % outFile) outFile = nestOut + "fig_m%d_corner.pdf" % modelNum cornerFig.savefig(outFile) print("Plot of posterior samples saved to \n '%s'\n" % outFile) # Display the figures if showPlots: specFig.show() cornerFig.show() print("> Press <RETURN> to exit ...", end="") sys.stdout.flush() input() # Clean up plt.close(specFig) plt.close(cornerFig) # Clean up MPI environment if mpiSwitch: MPI.Finalize()
def run_rmsynth(dataFile, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False): """ Read the I, Q & U data from the ASCII file and run RM-synthesis. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # Output prefix is derived from the input file name prefixOut, ext = os.path.splitext(dataFile) # Read the data-file. Format=space-delimited, comments="#". print "Reading the data file '%s':" % dataFile # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy try: print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]", (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." except Exception: print "...failed." # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy try: print "> Trying [freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy]", (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." noStokesI = True except Exception: print "...failed." if debug: print traceback.format_exc() sys.exit() print "Successfully read in the Stokes spectra." # If no Stokes I present, create a dummy spectrum = unity if noStokesI: print "Warn: no Stokes I data in use." IArr_Jy = np.ones_like(QArr_Jy) dIArr_Jy = np.zeros_like(QArr_Jy) # Convert to GHz and mJy for convenience freqArr_GHz = freqArr_Hz / 1e9 IArr_mJy = IArr_Jy * 1e3 QArr_mJy = QArr_Jy * 1e3 UArr_mJy = UArr_Jy * 1e3 dIArr_mJy = dIArr_Jy * 1e3 dQArr_mJy = dQArr_Jy * 1e3 dUArr_mJy = dUArr_Jy * 1e3 dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0 dQUArr_Jy = dQUArr_mJy / 1e3 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr_mJy, QArr = QArr_mJy, UArr = UArr_mJy, dIArr = dIArr_mJy, dQArr = dQArr_mJy, dUArr = dUArr_mJy, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if showPlots: print "Plotting the input data and spectral index fit." freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr_mJy=IArr_mJy, qArr=qArr, uArr=uArr, dIArr_mJy=dIArr_mJy, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr_mJy=IModHirArr_mJy, fig=specFig) # Use the custom navigation toolbar (does not work on Mac OS X) try: specFig.canvas.toolbar.pack_forget() CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) except Exception: pass # Display the figure specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz / 1e9, dQUArr_mJy, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz / 1e9, dQArr_mJy, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz / 1e9, dUArr_mJy, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9 ax.set_xlim( np.min(freqArr_Hz) / 1e9 - xRange * 0.05, np.max(freqArr_Hz) / 1e9 + xRange * 0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS (mJy bm$^{-1}$)') ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2)) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, 600.0) # Force the minimum phiMax # Faraday depth sampling. Zero always centred on middle channel nChanRM = round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0 startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) print "PhiArr = %.2f to %.2f by %.2f (%d chans)." % ( phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM) # Calculate the weighting as 1/sigma^2 or all 1s (natural) if weightType == "variance": weightArr = 1.0 / np.power(dQUArr_mJy, 2.0) else: weightType = "natural" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) print "Weight type is '%s'." % weightType startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr, dataU=uArr, lambdaSqArr_m2=lambdaSqArr_m2, phiArr_radm2=phiArr_radm2, weightArr=weightArr, nBits=nBits, verbose=True) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = np.isnan(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = True) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) print "> RM-synthesis completed in %.2f seconds." % cputime # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9) dirtyFDF *= (Ifreq0_mJybm / 1e3) # FDF is in Jy # Calculate the theoretical noise in the FDF dFDFth_Jybm = np.sqrt(1. / np.sum(1. / dQUArr_Jy**2.)) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF=dirtyFDF, phiArr=phiArr_radm2, fwhmRMSF=fwhmRMSF, dFDF=dFDFth_Jybm, lamSqArr_m2=lambdaSqArr_m2, lam0Sq=lam0Sq_m2) mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy)) mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm) # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3) mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=dqArr, duArr=duArr, fracPol=mDict["fracPol"], psi0_deg=mDict["polAngle0Fit_deg"], RM_radm2=mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) tmpFig.show() # Save the dirty FDF, RMSF and weight array to ASCII files print "Saving the dirty FDF, RMSF weight arrays to ASCII files." outFile = prefixOut + "_FDFdirty.dat" print "> %s" % outFile np.savetxt(outFile, zip(phiArr_radm2, dirtyFDF.real, dirtyFDF.imag)) outFile = prefixOut + "_RMSF.dat" print "> %s" % outFile np.savetxt(outFile, zip(phi2Arr_radm2, RMSFArr.real, RMSFArr.imag)) outFile = prefixOut + "_weight.dat" print "> %s" % outFile np.savetxt(outFile, zip(freqArr_Hz, weightArr)) # Save the measurements to a "key=value" text file print "Saving the measurements on the FDF in 'key=val' and JSON formats." outFile = prefixOut + "_RMsynth.dat" print "> %s" % outFile FH = open(outFile, "w") for k, v in mDict.iteritems(): FH.write("%s=%s\n" % (k, v)) FH.close() outFile = prefixOut + "_RMsynth.json" print "> %s" % outFile json.dump(dict(mDict), open(outFile, "w")) # Print the results to the screen print print '-' * 80 print 'RESULTS:\n' print 'FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]) print 'Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]) print 'Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]) print 'Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]) print 'freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9) print 'I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"]) print 'Peak PI = %.4g (+/-%.4g) mJy/beam' % ( mDict["ampPeakPIfit_Jybm"] * 1e3, mDict["dAmpPeakPIfit_Jybm"] * 1e3) print 'QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3) print 'FDF Noise (theory) = %.4g mJy/beam' % (mDict["dFDFth_Jybm"] * 1e3) print 'FDF SNR = %.4g ' % (mDict["snrPIfit"]) print 'sigma_add(q) = %.4g (+%.4g, -%.4g)' % ( mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"]) print 'sigma_add(u) = %.4g (+%.4g, -%.4g)' % ( mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"]) print print '-' * 80 # Plot the RM Spread Function and dirty FDF if showPlots: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr=phiArr_radm2, FDF=dirtyFDF, phi2Arr=phi2Arr_radm2, RMSFArr=RMSFArr, fwhmRMSF=fwhmRMSF, vLine=mDict["phiPeakPIfit_rm2"], fig=fdfFig) # Use the custom navigation toolbar try: fdfFig.canvas.toolbar.pack_forget() CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) except Exception: pass # Display the figure fdfFig.show() # Pause if plotting enabled if showPlots or debug: print "Press <RETURN> to exit ...", raw_input()
def run_qufit(dataFile, modelNum, nWalkers=200, nThreads=2, outDir="", polyOrd=3, nBits=32, noStokesI=False, showPlots=False, debug=False): """Root function controlling the fitting procedure.""" # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # Output prefix is derived from the input file name prefixOut, ext = os.path.splitext(dataFile) # Read the data-file. Format=space-delimited, comments='#'. print "Reading the data file '%s':" % dataFile # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy try: print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]", (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." except Exception: print "...failed." # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy try: print "Reading [freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy]", (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." noStokesI = True except Exception: print "...failed." if debug: print traceback.format_exc() sys.exit() # If no Stokes I present, create a dummy spectrum = unity if noStokesI: print "Warn: no Stokes I data in use." IArr_Jy = np.ones_like(QArr_Jy) dIArr_Jy = np.zeros_like(QArr_Jy) # Convert to GHz and mJy for convenience print "Successfully read in the Stokes spectra." freqArr_GHz = freqArr_Hz / 1e9 lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0) IArr_mJy = IArr_Jy * 1e3 QArr_mJy = QArr_Jy * 1e3 UArr_mJy = UArr_Jy * 1e3 dIArr_mJy = dIArr_Jy * 1e3 dQArr_mJy = dQArr_Jy * 1e3 dUArr_mJy = dUArr_Jy * 1e3 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, IfitDict = \ create_frac_spectra(freqArr=freqArr_GHz, IArr=IArr_mJy, QArr=QArr_mJy, UArr=UArr_mJy, dIArr=dIArr_mJy, dQArr=dQArr_mJy, dUArr=dUArr_mJy, polyOrd=polyOrd, verbose=True) # Plot the data and the Stokes I model fit if showPlots: print "Plotting the input data and spectral index fit." freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr_mJy = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(12, 8)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr_mJy=IArr_mJy, qArr=qArr, uArr=uArr, dIArr_mJy=dIArr_mJy, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr_mJy=IModHirArr_mJy, fig=specFig) # Use the custom navigation toolbar try: specFig.canvas.toolbar.pack_forget() CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) except Exception: pass # Display the figure specFig.canvas.draw() specFig.show() #-------------------------------------------------------------------------# # Load the model and parameters from the relevant file print "\nLoading the model from file 'models_mc/m%d.py' ..." % modelNum mod = imp.load_source("m%d" % modelNum, "models_mc/m%d.py" % modelNum) global model model = mod.model # Select the inputs to the chosen model by creating an instance of # inParmClass. Seed walker vectors based on the preset seed-range. ip = inParmClass(mod.inParms, mod.runParmDict) p0 = ip.seed_walkers(nWalkers) # Call the lnlike_total function to test it works OK print "> Calling ln(likelihood) as a test: L = ", L = lnlike_total(p0[0], ip, lamSqArr_m2, qArr, dqArr, uArr, duArr) print L if np.isnan(L): print "> Err: ln(likelihood) function returned NaN." sys.exit() # Define an MCMC sampler object. 3rd argument is ln(likelihood) function # and 4th is a list of additional arguments to lnlike() after walker. sampler = emcee.EnsembleSampler( nWalkers, ip.nDim, lnlike_total, args=[ip, lamSqArr_m2, qArr, dqArr, uArr, duArr], threads=nThreads) # Initialise the trace figure if showPlots: chainFigLst = [] for i in range(ip.nDim): chainFigLst.append(plt.figure(figsize=(8, 8))) # Run the sampler to explore parameter space print 'Explore parameter space for %d steps ...' % ip.nExploreSteps, sys.stdout.flush() pos, prob, state = sampler.run_mcmc(p0, ip.nExploreSteps) print 'done.' # Reset the samplers to a small range around the max(likelihood) maxPos = pos[np.argmax(prob, 0)] pos = [maxPos + 1e-9 * np.random.rand(ip.nDim) for i in range(nWalkers)] # Plot the chains for the exploration step if showPlots: print 'Plotting the walker chains for the wide exploration step.' titleStr = "Exploring all likely parameter space." plot_trace(sampler, ip.inParms, title=titleStr) sampler.reset() # Initialise the structure for holding the binned statistics # List of (list of dictionaries) statLst = [] for i in range(ip.nDim): statLst.append({ "stepBin": [], "medBin": [], "stdBin": [], "medAll": [], "stdAll": [], "B": [], "W": [], "R": [], "stat1": [], "stat2": [] }) likeStatDict = { "stepBin": [], "medBin": [], "stdBin": [], "stat1": [], "stat2": [] } # Run the sampler, polling the statistics every nPollSteps print "Running the sampler and polling every %d steps:" % (ip.nPollSteps) if ip.runMode == "auto": print "> Will attempt to detect MCMC chain stability." print "Maximum steps set to %d." % ip.maxSteps print "" while True: convergeFlg = False convergeFlgLst = [] print ".", sys.stdout.flush() # Run the sampler for nPollSteps pos, prob, state = sampler.run_mcmc(pos, ip.nPollSteps) # Perform wrapping if ip.inParms[n]['wrap'] is set. sampler, pos = wrap_chains(ip.inParms, sampler, pos, shift=True) # Measure the statistics of the binned likelihood stepBin = sampler.chain.shape[1] - (ip.nPollSteps / 2.0) likeWin = sampler.lnprobability[:, -ip.nPollSteps:] likeStatDict["stepBin"].append(stepBin) likeStatDict["medBin"].append(np.median(likeWin)) likeStatDict["stdBin"].append(np.std(likeWin)) # Measure the statistics of the binned chains chainWin = sampler.chain[:, -ip.nPollSteps:, :] for i in range(ip.nDim): mDict = gelman_rubin(chainWin[:, :, i]) statLst[i]["stepBin"].append(stepBin) statLst[i]["medBin"].append(np.median(chainWin[:, :, i])) statLst[i]["stdBin"].append(np.std(chainWin[:, :, i])) statLst[i]["medAll"].append(mDict["medAll"]) statLst[i]["stdAll"].append(mDict["stdAll"]) statLst[i]["B"].append(mDict["B"]) statLst[i]["W"].append(mDict["W"]) statLst[i]["R"].append(mDict["R"]) # Check for convergence in each parameter trace convergeFlg, stat1, stat2 = \ chk_trace_stable(statDict=statLst[i], nCycles=ip.nStableCycles, stdLim=ip.parmStdLim, medLim=ip.parmMedLim) convergeFlgLst.append(convergeFlg) statLst[i]["stat1"].append(stat1) statLst[i]["stat2"].append(stat2) # Check for convergence in the likelihood trace convergeFlg, stat1, stat2 = \ chk_trace_stable(statDict=likeStatDict, nCycles=ip.nStableCycles, stdLim=ip.likeStdLim, medLim=ip.likeMedLim) convergeFlgLst.append(convergeFlg) likeStatDict["stat1"].append(stat1) likeStatDict["stat2"].append(stat2) # If all traces have converged, continue if ip.runMode == "auto" and np.all(convergeFlgLst): print "\n>Stability threshold passed!" break # Continue at the upper step limit if sampler.chain.shape[1] > ip.maxSteps: print "\nMaximum number of steps performed." break # Plot the likelihood trace and statistics if debug: plot_like_stats(likeStatDict) if not showPlots: print "Press <RETURN> ...", raw_input() # Discard the burn-in section of the chain print "\nUsing the last %d steps to sample the posterior.\n" % ip.nSteps chainCut = sampler.chain[:, -ip.nSteps:, :] s = chainCut.shape flatChainCut = chainCut.reshape(s[0] * s[1], s[2]) lnprobCut = sampler.lnprobability[-ip.nSteps:, :] flatLnprobCut = lnprobCut.flatten() # Plot the chains if showPlots: print 'Plotting the walker chains after polling ...' plot_trace_stats(sampler, ip.inParms, figLst=chainFigLst, nSteps=ip.nSteps, statLst=statLst) # Determine the best-fit values from the 16th, 50th and 84th percentile # Marginalizing in MCMC is simple: select the axis of the parameter. # Update ip.inParms with the best-fitting values. pBest = [] print print '-' * 80 print 'RESULTS:\n' for i in range(len(ip.fxi)): fChain = flatChainCut[:, i] g = lambda v: (v[1], v[2] - v[1], v[1] - v[0]) best, errPlus, errMinus = g(np.percentile(fChain, [15.72, 50, 84.27])) pBest.append(best) ip.inParms[ip.fxi[i]]['value'] = best ip.inParms[ip.fxi[i]]['errPlus'] = errPlus ip.inParms[ip.fxi[i]]['errMinus'] = errMinus print '%s = %.4g (+%3g, -%3g)' % (ip.inParms[ip.fxi[i]]['parname'], best, errPlus, errMinus) # Calculate goodness-of-fit parameters nData = 2.0 * len(lamSqArr_m2) dof = nData - ip.nDim - 1 chiSq = chisq_model(ip.inParms, lamSqArr_m2, qArr, dqArr, uArr, duArr) chiSqRed = chiSq / dof # Calculate the information criteria lnLike = lnlike_model(ip.inParms, lamSqArr_m2, qArr, dqArr, uArr, duArr) AIC = 2.0 * ip.nDim - 2.0 * lnLike AICc = 2.0 * ip.nDim * (ip.nDim + 1) / (nData - ip.nDim - 1) - 2.0 * lnLike BIC = ip.nDim * np.log(nData) - 2.0 * lnLike print print "DOF:", dof print "CHISQ:", chiSq print "CHISQ RED:", chiSqRed print "AIC:", AIC print "AICc", AICc print "BIC", BIC print print '-' * 80 # Create a save dictionary saveObj = { "inParms": ip.inParms, "flatchain": flatChainCut, "flatlnprob": flatLnprobCut, "chain": chainCut, "lnprob": lnprobCut, "convergeFlg": np.all(convergeFlgLst), "dof": dof, "chiSq": chiSq, "chiSqRed": chiSqRed, "AIC": AIC, "AICc": AICc, "BIC": BIC, "IfitDict": IfitDict } # Save the Markov chain and results to a Python Pickle outFile = prefixOut + "_MCMC.pkl" if os.path.exists(outFile): os.remove(outFile) fh = open(outFile, "wb") pkl.dump(saveObj, fh) fh.close() print "> Results and MCMC chains saved in pickle file '%s'" % outFile # Plot the results if showPlots: print "Plotting the best-fitting model." lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000) freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2) IModArr_mJy = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) quModArr = model(ip.inParms, lamSqHirArr_m2) specFig.clf() plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr_mJy=IArr_mJy, qArr=qArr, uArr=uArr, dIArr_mJy=dIArr_mJy, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr_mJy=IModArr_mJy, qModArr=quModArr.real, uModArr=quModArr.imag, fig=specFig) specFig.canvas.draw() print "> Press <RETURN> to exit ...", raw_input()
def run_qufit( data, modelName, IMod=None, polyOrd=3, nBits=32, verbose=False, diagnostic_plots=True, values=None, bounds=None, ): """Function for Nested sampling fitting of Stokes parameters""" if mpiSwitch: # Get the processing environment mpiComm = MPI.COMM_WORLD mpiSize = mpiComm.Get_size() mpiRank = mpiComm.Get_rank() else: mpiSize = 1 mpiRank = 0 # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) if isinstance(diagnostic_plots, str): outDir = diagnostic_plots else: # outDir=os.path.expanduser("~") outDir = "/tmp" nestOut = outDir + "/QUfit_nest/" if mpiRank == 0: if os.path.exists(nestOut): shutil.rmtree(nestOut, True) os.mkdir(nestOut) if mpiSwitch: mpiComm.Barrier() # Read the data file in the root process if mpiRank == 0: dataArr = data.copy() if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) # Parse the data array # freq_Hz, I, Q, U, V, dI, dQ, dU, dV try: (freqArr_Hz, IArr, QArr, UArr, VArr, dIArr, dQArr, dUArr, dVArr) = dataArr if mpiRank == 0: print("\nFormat [freq_Hz, I, Q, U, V, dI, dQ, dU, dV]") except Exception: print("pass data in format: [freq_Hz, I, Q, U, V, dI, dQ, dU, dV]") return # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0) # Fit the Stokes I spectrum and create the fractional spectra if mpiRank == 0: if IMod is None: dataArr = create_frac_spectra_test( freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr, dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, VArr=VArr, dVArr=dVArr, polyOrd=polyOrd, IModArr=None, verbose=True, ) else: dataArr = create_frac_spectra_test( freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr, dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, VArr=VArr, dVArr=dVArr, polyOrd=polyOrd, IModArr=IMod(freqArr_Hz), verbose=True, ) else: dataArr = None if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) (IModArr, qArr, uArr, vArr, dqArr, duArr, dvArr, IfitDict) = dataArr # -------------------------------------------------------------------------# # Load the model and parameters from the relevant file if mpiSwitch: mpiComm.Barrier() global model model = models.get_model(modelName) inParms = models.get_params(modelName) # Let's time the sampler if mpiRank == 0: startTime = time.time() # Unpack the inParms structure parNames = [x["parname"] for x in inParms] labels = [x["label"] for x in inParms] if values is None: values = [x["value"] for x in inParms] if bounds is None: bounds = [x["bounds"] for x in inParms] priorTypes = [x["priortype"] for x in inParms] wraps = [x["wrap"] for x in inParms] nDim = len(priorTypes) fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes] nFree = sum(fixedMsk) # Set the prior function given the bounds of each parameter prior = prior_call(priorTypes, bounds, values) # Set the likelihood function given the data lnlike = lnlike_call( parNames, lamSqArr_m2, QArr, dQArr, UArr, dUArr, VArr, dVArr, IModArr ) # Let's time the sampler if mpiRank == 0: startTime = time.time() # Run nested sampling using PyMultiNest nestArgsDict = merge_two_dicts(init_mnest(), models.nestArgsDict) nestArgsDict["n_params"] = nDim nestArgsDict["n_dims"] = nDim nestArgsDict["outputfiles_basename"] = nestOut nestArgsDict["LogLikelihood"] = lnlike nestArgsDict["Prior"] = prior pmn.run(**nestArgsDict) # Do the post-processing on one processor if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: # Query the analyser object for results aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut) statDict = aObj.get_stats() fitDict = aObj.get_best_fit() endTime = time.time() # NOTE: The Analyser methods do not work well for parameters with # posteriors that overlap the wrap value. Use np.percentile instead. pMed = [None] * nDim for i in range(nDim): pMed[i] = statDict["marginals"][i]["median"] lnLike = fitDict["log_likelihood"] lnEvidence = statDict["nested sampling global log-evidence"] dLnEvidence = statDict["nested sampling global log-evidence error"] # Get the best-fitting values & uncertainties directly from chains chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, pMed) p = [None] * nDim errPlus = [None] * nDim errMinus = [None] * nDim g = lambda v: (v[1], v[2] - v[1], v[1] - v[0]) for i in range(nDim): p[i], errPlus[i], errMinus[i] = g( np.percentile(chains[:, i], [15.72, 50, 84.27]) ) # Calculate goodness-of-fit parameters nData = 2.0 * len(lamSqArr_m2) dof = nData - nFree - 1 chiSq = chisq_model( parNames, p, lamSqArr_m2, QArr, dQArr, UArr, dUArr, VArr, dVArr, IModArr ) chiSqRed = chiSq / dof AIC = 2.0 * nFree - 2.0 * lnLike AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike BIC = nFree * np.log(nData) - 2.0 * lnLike # Summary of run print("") print("-" * 80) print("SUMMARY OF SAMPLING RUN:") print("#-PROCESSORS = %d" % mpiSize) print("RUN-TIME = %.2f" % (endTime - startTime)) print("DOF = %d" % dof) print("CHISQ: = %.3g" % chiSq) print("CHISQ RED = %.3g" % chiSqRed) print("AIC: = %.3g" % AIC) print("AICc = %.3g" % AICc) print("BIC = %.3g" % BIC) print("ln(EVIDENCE) = %.3g" % lnEvidence) print("dLn(EVIDENCE) = %.3g" % dLnEvidence) print("") print("-" * 80) print("RESULTS:\n") for i in range(len(p)): print( "%s = %.4g (+%3g, -%3g)" % (parNames[i], p[i], errPlus[i], errMinus[i]) ) print("-" * 80) print("") # Create a save dictionary and store final p in values # outFile = nestOut + "m%d_nest.json" % modelNum outFile = nestOut + "%s_nest.json" % modelName IfitDict["p"] = toscalar(IfitDict["p"].tolist()) saveDict = { "parNames": toscalar(parNames), "labels": toscalar(labels), "values": toscalar(p), "errPlus": toscalar(errPlus), "errMinus": toscalar(errMinus), "bounds": toscalar(bounds), "priorTypes": toscalar(priorTypes), "wraps": toscalar(wraps), "dof": toscalar(dof), "chiSq": toscalar(chiSq), "chiSqRed": toscalar(chiSqRed), "AIC": toscalar(AIC), "AICc": toscalar(AICc), "BIC": toscalar(BIC), "IfitDict": IfitDict, } json.dump(saveDict, open(outFile, "w")) print("Results saved in JSON format to:\n '%s'\n" % outFile) # Plot the data and best-fitting model lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000) freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2) pDict = {k: v for k, v in zip(parNames, p)} if IMod: IModHirArr = IMod(freqHirArr_Hz) else: IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) quModArr, vModArr = model(pDict, lamSqHirArr_m2, IModHirArr) specFig = plt.figure(figsize=(10, 6)) plot_pqu_spectra_chime( freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModHirArr, qModArr=quModArr.real, uModArr=quModArr.imag, fig=specFig, ) specFig.canvas.draw() # Plot the posterior samples in a corner plot chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim] iFixed = [i for i, e in enumerate(fixedMsk) if e == 0] chains = np.delete(chains, iFixed, 1) for i in sorted(iFixed, reverse=True): del labels[i] del p[i] cornerFig = corner.corner( xs=chains, labels=labels, range=[0.99999] * nFree, truths=p, quantiles=[0.1572, 0.8427], bins=30, ) # Plot the stokes Q vs. U (NEEDS WORK) qvsuFig = plot_q_vs_u_ax_chime( freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, qModArr=quModArr.real / IModHirArr, uModArr=quModArr.imag / IModHirArr, ) if diagnostic_plots: if isinstance(diagnostic_plots, bool): qvsuFig.show() sys.stdout.flush() else: outFile = diagnostic_plots + "/fig_%s_specfit.pdf" % modelName specFig.savefig(outFile) print("Plot of best-fitting model saved to:\n '%s'\n" % outFile) outFile = diagnostic_plots + "/fig_%s_corner.pdf" % modelName cornerFig.savefig(outFile) print("Plot of posterior samples saved to \n '%s'\n" % outFile) outFile = diagnostic_plots + "/fig_%s_q_vs_u.pdf" % modelName qvsuFig.savefig(outFile) pol_prod = zip(p, errPlus, errMinus) return ( list(pol_prod), freqHirArr_Hz, qArr, uArr, vArr, dqArr, duArr, dvArr, IModArr, quModArr.real, quModArr.imag, vModArr, )
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print): """ Read the I, Q & U data and run RM-synthesis. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy try: if verbose: log("> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]", end=' ') (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy, dUArr_Jy) = data if verbose: log("... success.") except Exception: if verbose: log("...failed.") # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy try: if verbose: log("> Trying [freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy]", end=' ') (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = data if verbose: log("... success.") noStokesI = True except Exception: if verbose: log("...failed.") if debug: log(traceback.format_exc()) sys.exit() if verbose: log("Successfully read in the Stokes spectra.") # If no Stokes I present, create a dummy spectrum = unity if noStokesI: log("Warn: no Stokes I data in use.") IArr_Jy = np.ones_like(QArr_Jy) dIArr_Jy = np.zeros_like(QArr_Jy) # Convert to GHz and mJy for convenience freqArr_GHz = freqArr_Hz / 1e9 IArr_mJy = IArr_Jy * 1e3 QArr_mJy = QArr_Jy * 1e3 UArr_mJy = UArr_Jy * 1e3 dIArr_mJy = dIArr_Jy * 1e3 dQArr_mJy = dQArr_Jy * 1e3 dUArr_mJy = dUArr_Jy * 1e3 dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0 dQUArr_Jy = dQUArr_mJy / 1e3 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr_mJy, QArr = QArr_mJy, UArr = UArr_mJy, dIArr = dIArr_mJy, dQArr = dQArr_mJy, dUArr = dUArr_mJy, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if showPlots: if verbose: log("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr_mJy=IArr_mJy, qArr=qArr, uArr=uArr, dIArr_mJy=dIArr_mJy, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr_mJy=IModHirArr_mJy, fig=specFig) # Use the custom navigation toolbar (does not work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # if not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz / 1e9, dQUArr_mJy, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz / 1e9, dQArr_mJy, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz / 1e9, dUArr_mJy, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9 ax.set_xlim( np.min(freqArr_Hz) / 1e9 - xRange * 0.05, np.max(freqArr_Hz) / 1e9 + xRange * 0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS (mJy bm$^{-1}$)') ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2)) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, 600.0) # Force the minimum phiMax # Faraday depth sampling. Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or all 1s (uniform) if weightType == "variance": weightArr = 1.0 / np.power(dQUArr_mJy, 2.0) else: weightType = "uniform" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log("Weight type is '%s'." % weightType) startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr, dataU=uArr, lambdaSqArr_m2=lambdaSqArr_m2, phiArr_radm2=phiArr_radm2, weightArr=weightArr, nBits=nBits, verbose=True, log=log) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = True, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime) # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9) dirtyFDF *= (Ifreq0_mJybm / 1e3) # FDF is in Jy # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights! #dFDFth_Jybm = np.sqrt(1./np.sum(1./dQUArr_Jy**2.)) dFDFth_Jybm = np.sqrt( np.sum(weightArr**2 * dQUArr_Jy**2) / (np.sum(weightArr))**2) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF=dirtyFDF, phiArr=phiArr_radm2, fwhmRMSF=fwhmRMSF, dFDF=dFDFth_Jybm, lamSqArr_m2=lambdaSqArr_m2, lam0Sq=lam0Sq_m2) mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy)) mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm) if mDict['phiPeakPIfit_rm2'] == None: log('Peak is at edge of RM spectrum! Peak fitting failed!\n') log('Rerunning with Phi_max twice as large.') #The following code re-runs everything with higher phiMax, #Then overwrite the appropriate variables so as to continue on without #interuption. mDict, aDict = run_rmsynth(data=data, polyOrd=polyOrd, phiMax_radm2=phiMax_radm2 * 2, dPhi_radm2=dPhi_radm2, nSamples=nSamples, weightType=weightType, fitRMSF=fitRMSF, noStokesI=noStokesI, nBits=nBits, showPlots=False, debug=debug, verbose=verbose) phiArr_radm2 = aDict["phiArr_radm2"] phi2Arr_radm2 = aDict["phi2Arr_radm2"] RMSFArr = aDict["RMSFArr"] freqArr_Hz = aDict["freqArr_Hz"] weightArr = aDict["weightArr"] dirtyFDF = aDict["dirtyFDF"] # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3) mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=dqArr, duArr=duArr, fracPol=mDict["fracPol"], psi0_deg=mDict["polAngle0Fit_deg"], RM_radm2=mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) tmpFig.show() #add array dictionary aDict = dict() aDict["phiArr_radm2"] = phiArr_radm2 aDict["phi2Arr_radm2"] = phi2Arr_radm2 aDict["RMSFArr"] = RMSFArr aDict["freqArr_Hz"] = freqArr_Hz aDict["weightArr"] = weightArr aDict["dirtyFDF"] = dirtyFDF if verbose: # Print the results to the screen log() log('-' * 80) log('RESULTS:\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"])) log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9)) log('I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"])) log('Peak PI = %.4g (+/-%.4g) mJy/beam' % (mDict["ampPeakPIfit_Jybm"] * 1e3, mDict["dAmpPeakPIfit_Jybm"] * 1e3)) log('QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3)) log('FDF Noise (theory) = %.4g mJy/beam' % (mDict["dFDFth_Jybm"] * 1e3)) log('FDF Noise (Corrected MAD) = %.4g mJy/beam' % (mDict["dFDFcorMAD_Jybm"] * 1e3)) log('FDF Noise (rms) = %.4g mJy/beam' % (mDict["dFDFrms_Jybm"] * 1e3)) log('FDF SNR = %.4g ' % (mDict["snrPIfit"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])) log() log('-' * 80) # Plot the RM Spread Function and dirty FDF if showPlots: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr=phiArr_radm2, FDF=dirtyFDF, phi2Arr=phi2Arr_radm2, RMSFArr=RMSFArr, fwhmRMSF=fwhmRMSF, vLine=mDict["phiPeakPIfit_rm2"], fig=fdfFig) # Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # fdfFig.show() # Pause if plotting enabled if showPlots or debug: plt.show() # #if verbose: print "Press <RETURN> to exit ...", # input() return mDict, aDict