Пример #1
0
def run_qufit(dataFile,
              modelNum,
              outDir="",
              polyOrd=3,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              debug=False,
              verbose=False):
    """Function controlling the fitting procedure."""

    # Get the processing environment
    if mpiSwitch:
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)
    nestOut = prefixOut + "_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut):
            shutil.rmtree(nestOut, True)
        os.mkdir(nestOut)
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr
            if mpiRank == 0:
                print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            if mpiSwitch:
                MPI.Finalize()
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if mpiRank == 0:
            print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        dataArr = create_frac_spectra(freqArr=freqArr_GHz,
                                      IArr=IArr,
                                      QArr=QArr,
                                      UArr=UArr,
                                      dIArr=dIArr,
                                      dQArr=dQArr,
                                      dUArr=dUArr,
                                      polyOrd=polyOrd,
                                      verbose=True)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    if mpiRank == 0:
        print("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(10, 6))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        if showPlots:
            specFig.canvas.draw()
            specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:
        print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum)
    mod = imp.load_source("m%d" % modelNum, "models_ns/m%d.py" % modelNum)
    global model
    model = mod.model

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in mod.inParms]
    labels = [x["label"] for x in mod.inParms]
    values = [x["value"] for x in mod.inParms]
    bounds = [x["bounds"] for x in mod.inParms]
    priorTypes = [x["priortype"] for x in mod.inParms]
    wraps = [x["wrap"] for x in mod.inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    pmn.run(**nestArgsDict)

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]['median']
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            p[i], errPlus[i], errMinus[i] = \
                        g(np.percentile(chains[:, i], [15.72, 50, 84.27]))

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr)
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print("%s = %.4g (+%3g, -%3g)" % \
                  (parNames[i], p[i], errPlus[i], errMinus[i]))
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        outFile = prefixOut + "_m%d_nest.json" % modelNum
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "IfitDict": IfitDict
        }
        json.dump(saveDict, open(outFile, "w"))
        print("Results saved in JSON format to:\n '%s'\n" % outFile)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        pDict = {k: v for k, v in zip(parNames, p)}
        quModArr = model(pDict, lamSqHirArr_m2)
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModArr,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              fig=specFig)
        specFig.canvas.draw()

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del (labels[i])
            del (p[i])
        cornerFig = corner.corner(xs=chains,
                                  labels=labels,
                                  range=[0.99999] * nFree,
                                  truths=p,
                                  quantiles=[0.1572, 0.8427],
                                  bins=30)

        # Save the figures
        outFile = nestOut + "fig_m%d_specfit.pdf" % modelNum
        specFig.savefig(outFile)
        print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
        outFile = nestOut + "fig_m%d_corner.pdf" % modelNum
        cornerFig.savefig(outFile)
        print("Plot of posterior samples saved to \n '%s'\n" % outFile)

        # Display the figures
        if showPlots:
            specFig.show()
            cornerFig.show()
            print("> Press <RETURN> to exit ...", end="")
            sys.stdout.flush()
            input()

        # Clean up
        plt.close(specFig)
        plt.close(cornerFig)

    # Clean up MPI environment
    if mpiSwitch:
        MPI.Finalize()
Пример #2
0
def run_rmsynth(data,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False,
                verbose=False,
                log=print,
                units='Jy/beam',
                e_num=1):
    """Run RM synthesis on 1D data.

    Args:
        data (list): Contains frequency and polarization data as either:
            [freq_Hz, I, Q, U, dI, dQ, dU]
                freq_Hz (array_like): Frequency of each channel in Hz.
                I (array_like): Stokes I intensity in each channel.
                Q (array_like): Stokes Q intensity in each channel.
                U (array_like): Stokes U intensity in each channel.
                dI (array_like): Error in Stokes I intensity in each channel.
                dQ (array_like): Error in Stokes Q intensity in each channel.
                dU (array_like): Error in Stokes U intensity in each channel.
            or
            [freq_Hz, q, u,  dq, du]
                freq_Hz (array_like): Frequency of each channel in Hz.
                q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
                u (array_like): Fractional Stokes U intensity (U/I) in each channel.
                dq (array_like): Error in fractional Stokes Q intensity in each channel.
                du (array_like): Error in fractional Stokes U intensity in each channel.

    Kwargs:
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
        phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
        dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
        nSamples (float): Number of samples across the RMSF.
        weightType (str): Can be "variance" or "uniform"
            "variance" -- Weight by uncertainty in Q and U.
            "uniform" -- Weight uniformly (i.e. with 1s)
        fitRMSF (bool): Fit a Gaussian to the RMSF?
        noStokesI (bool: Is Stokes I data provided?
        phiNoise_radm2 (float): ????
        nBits (int): Precision of floating point numbers.
        showPlots (bool): Show plots?
        debug (bool): Turn on debugging messages & plots?
        verbose (bool): Verbosity.
        log (function): Which logging function to use.
        units (str): Units of data.

    Returns:
        mDict (dict): Summary of RM synthesis results.
        aDict (dict): Data output by RM synthesis.

    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
        if verbose: log("... success.")
    except Exception:
        if verbose: log("...failed.")
        # freq_Hz, q, u, dq, du
        try:
            if verbose: log("> Trying [freq_Hz, q, u,  dq, du]", end=' ')
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
            if verbose: log("... success.")
            noStokesI = True
        except Exception:
            if verbose: log("...failed.")
            if debug:
                log(traceback.format_exc())
            sys.exit()
    if verbose: log("Successfully read in the Stokes spectra.")

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if verbose: log("Warn: no Stokes I data in use.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    dQUArr = (dQArr + dUArr) / 2.0

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr,
                                 QArr     = QArr,
                                 UArr     = UArr,
                                 dIArr    = dIArr,
                                 dQArr    = dQArr,
                                 dUArr    = dUArr,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        if verbose: log("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig,
                              units=units)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        #        try:
        #            specFig.canvas.toolbar.pack_forget()
        #            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        #        except Exception:
        #            pass

        # Display the figure
        #        if not plt.isinteractive():
        #            specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS ' + units)
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
#            rmsFig.show()

#-------------------------------------------------------------------------#

# Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    if verbose:
        log("PhiArr = %.2f to %.2f by %.2f (%d chans)." %
            (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM))

    # Calculate the weighting as 1/sigma^2 or all 1s (uniform)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr, 2.0)
    else:
        weightType = "uniform"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    if verbose: log("Weight type is '%s'." % weightType)

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2, mylist = do_rmsynth_planes(
        dataQ=qArr,
        dataU=uArr,
        lambdaSqArr_m2=lambdaSqArr_m2,
        phiArr_radm2=phiArr_radm2,
        weightArr=weightArr,
        nBits=nBits,
        verbose=verbose,
        log=log,
        e_num=e_num)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = ~np.isfinite(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = verbose,
                        log             = log)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0 = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0
                 )  # FDF is in fracpol units initially, convert back to flux

    # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
    weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
    dFDFth = np.sqrt(
        np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) /
        (np.sum(weightArr))**2)

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0"] = toscalar(Ifreq0)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU"] = toscalar(nanmedian(dQUArr))
    mDict["dFDFth"] = toscalar(dFDFth)
    mDict["units"] = units
    mDict['dQUArr'] = dQUArr

    if fitDict["fitStatus"] >= 128:
        log("WARNING: Stokes I model contains negative values!")
    elif fitDict["fitStatus"] >= 64:
        log("Caution: Stokes I model has low signal-to-noise.")

    #Add information on nature of channels:
    good_channels = np.where(np.logical_and(weightArr != 0,
                                            np.isfinite(qArr)))[0]
    mDict["min_freq"] = float(np.min(freqArr_Hz[good_channels]))
    mDict["max_freq"] = float(np.max(freqArr_Hz[good_channels]))
    mDict["N_channels"] = good_channels.size
    mDict["median_channel_width"] = float(np.median(np.diff(freqArr_Hz)))

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit"] / (Ifreq0)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    #add array dictionary
    aDict = dict()
    aDict["phiArr_radm2"] = phiArr_radm2
    aDict["phi2Arr_radm2"] = phi2Arr_radm2
    aDict["RMSFArr"] = RMSFArr
    aDict["freqArr_Hz"] = freqArr_Hz
    aDict["weightArr"] = weightArr
    aDict["dirtyFDF"] = dirtyFDF

    if verbose:
        # Print the results to the screen
        log()
        log('-' * 80)
        log('RESULTS:\n')
        log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))

        log('Pol Angle = %.4g (+/-%.4g) deg' %
            (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]))
        log('Pol Angle 0 = %.4g (+/-%.4g) deg' %
            (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]))
        log('Peak FD = %.4g (+/-%.4g) rad/m^2' %
            (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]))
        log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9))
        log('I freq0 = %.4g %s' % (mDict["Ifreq0"], units))
        log('Peak PI = %.4g (+/-%.4g) %s' %
            (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"], units))
        log('QU Noise = %.4g %s' % (mDict["dQU"], units))
        log('FDF Noise (theory)   = %.4g %s' % (mDict["dFDFth"], units))
        log('FDF Noise (Corrected MAD) = %.4g %s' %
            (mDict["dFDFcorMAD"], units))
        log('FDF Noise (rms)   = %.4g %s' % (mDict["dFDFrms"], units))
        log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
        log('sigma_add(q) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"],
             mDict["dSigmaAddMinusQ"]))
        log('sigma_add(u) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"],
             mDict["dSigmaAddMinusU"]))
        log()
        log('-' * 80)

    myfig = plotmylist(mylist)
    plt.show()
    myfig.show()

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig,
                          units=units)

        # Use the custom navigation toolbar
#        try:
#            fdfFig.canvas.toolbar.pack_forget()
#            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
#        except Exception:
#            pass

# Display the figure
#        fdfFig.show()

# Pause if plotting enabled
    if showPlots or debug:
        plt.show()
        #        #if verbose: print "Press <RETURN> to exit ...",


#        input()

    return mDict, aDict, mylist
Пример #3
0
def run_rmsynth(dataFile,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False):
    """
    Read the I, Q & U data from the ASCII file and run RM-synthesis.
    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)

    # Read the data-file. Format=space-delimited, comments="#".
    print "Reading the data file '%s':" % dataFile
    # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy
    try:
        print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]",
        (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy,
         dIArr_Jy, dQArr_Jy, dUArr_Jy) = \
         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
        print "... success."
    except Exception:
        print "...failed."
        # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy
        try:
            print "> Trying [freq_Hz, q_Jy, u_Jy,  dq_Jy, du_Jy]",
            (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \
                         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
            print "... success."
            noStokesI = True
        except Exception:
            print "...failed."
            if debug:
                print traceback.format_exc()
            sys.exit()
    print "Successfully read in the Stokes spectra."

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        print "Warn: no Stokes I data in use."
        IArr_Jy = np.ones_like(QArr_Jy)
        dIArr_Jy = np.zeros_like(QArr_Jy)

    # Convert to GHz and mJy for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    IArr_mJy = IArr_Jy * 1e3
    QArr_mJy = QArr_Jy * 1e3
    UArr_mJy = UArr_Jy * 1e3
    dIArr_mJy = dIArr_Jy * 1e3
    dQArr_mJy = dQArr_Jy * 1e3
    dUArr_mJy = dUArr_Jy * 1e3
    dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0
    dQUArr_Jy = dQUArr_mJy / 1e3

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr_mJy,
                                 QArr     = QArr_mJy,
                                 UArr     = UArr_mJy,
                                 dIArr    = dIArr_mJy,
                                 dQArr    = dQArr_mJy,
                                 dUArr    = dUArr_mJy,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        print "Plotting the input data and spectral index fit."
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModHirArr_mJy,
                              fig=specFig)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr_mJy,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr_mJy,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr_mJy,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS (mJy bm$^{-1}$)')
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
            rmsFig.show()

    #-------------------------------------------------------------------------#

    # Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    print "PhiArr = %.2f to %.2f by %.2f (%d chans)." % (
        phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)

    # Calculate the weighting as 1/sigma^2 or all 1s (natural)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr_mJy, 2.0)
    else:
        weightType = "natural"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    print "Weight type is '%s'." % weightType

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr,
                                            dataU=uArr,
                                            lambdaSqArr_m2=lambdaSqArr_m2,
                                            phiArr_radm2=phiArr_radm2,
                                            weightArr=weightArr,
                                            nBits=nBits,
                                            verbose=True)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = np.isnan(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = True)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    print "> RM-synthesis completed in %.2f seconds." % cputime

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0_mJybm / 1e3)  # FDF is in Jy

    # Calculate the theoretical noise in the FDF
    dFDFth_Jybm = np.sqrt(1. / np.sum(1. / dQUArr_Jy**2.))

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth_Jybm,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy))
    mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm)

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    # Save the  dirty FDF, RMSF and weight array to ASCII files
    print "Saving the dirty FDF, RMSF weight arrays to ASCII files."
    outFile = prefixOut + "_FDFdirty.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(phiArr_radm2, dirtyFDF.real, dirtyFDF.imag))
    outFile = prefixOut + "_RMSF.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(phi2Arr_radm2, RMSFArr.real, RMSFArr.imag))
    outFile = prefixOut + "_weight.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(freqArr_Hz, weightArr))

    # Save the measurements to a "key=value" text file
    print "Saving the measurements on the FDF in 'key=val' and JSON formats."
    outFile = prefixOut + "_RMsynth.dat"
    print "> %s" % outFile
    FH = open(outFile, "w")
    for k, v in mDict.iteritems():
        FH.write("%s=%s\n" % (k, v))
    FH.close()
    outFile = prefixOut + "_RMsynth.json"
    print "> %s" % outFile
    json.dump(dict(mDict), open(outFile, "w"))

    # Print the results to the screen
    print
    print '-' * 80
    print 'RESULTS:\n'
    print 'FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])

    print 'Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
                                              mDict["dPolAngleFit_deg"])
    print 'Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
                                                mDict["dPolAngle0Fit_deg"])
    print 'Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
                                                mDict["dPhiPeakPIfit_rm2"])
    print 'freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9)
    print 'I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"])
    print 'Peak PI = %.4g (+/-%.4g) mJy/beam' % (
        mDict["ampPeakPIfit_Jybm"] * 1e3, mDict["dAmpPeakPIfit_Jybm"] * 1e3)
    print 'QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3)
    print 'FDF Noise (theory)   = %.4g mJy/beam' % (mDict["dFDFth_Jybm"] * 1e3)
    print 'FDF SNR = %.4g ' % (mDict["snrPIfit"])
    print 'sigma_add(q) = %.4g (+%.4g, -%.4g)' % (
        mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])
    print 'sigma_add(u) = %.4g (+%.4g, -%.4g)' % (
        mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])
    print
    print '-' * 80

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig)

        # Use the custom navigation toolbar
        try:
            fdfFig.canvas.toolbar.pack_forget()
            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        fdfFig.show()

    # Pause if plotting enabled
    if showPlots or debug:
        print "Press <RETURN> to exit ...",
        raw_input()
Пример #4
0
def run_qufit(dataFile,
              modelNum,
              outDir="",
              polyOrd=2,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              sigma_clip=5,
              debug=False,
              verbose=False,
              restart=True,
              fit_function='log'):
    """Carry out QU-fitting using the supplied parameters:
        dataFile (str, required): relative or absolute path of file containing 
            frequencies and Stokes parameters with errors.
        modelNum (int, required): number of model to be fit to data. Models and
             priors are specified as Python code in files called 'mX.py' within  
            the 'models_ns' directory.
        outDir (str): relative or absolute path to save outputs to. Defaults to
            working directory.
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum (used to
            normalize Q and U values). Defaults to 3 (cubic).
        nBits (int): number of bits to use in internal calculations.
        noStokesI (bool): set True if the Stokes I spectrum should be ignored.
        showPlots (bool): Set true if the spectrum and parameter space plots
            should be displayed.
        sigma_clip (float): How many standard deviations to clip around the 
            mean of each mode in the parameter postierors.
        debug (bool): Display debug messages.
        verbose (bool): Print verbose messages/results to terminal.
        
        Returns: nothing. Results saved to files and/or printed to terminal."""

    # Get the processing environment
    if mpiSwitch:
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)
    nestOut = f"{prefixOut}_m{modelNum}_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut) and restart:
            shutil.rmtree(nestOut, True)
            os.mkdir(nestOut)
        elif not os.path.exists(nestOut) and restart:
            os.mkdir(nestOut)
        elif not os.path.exists(nestOut) and not restart:
            print("Restart requested, but previous run not found!")
            raise Exception(f"{nestOut} does not exist.")
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr
            if mpiRank == 0:
                print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            if mpiSwitch:
                MPI.Finalize()
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if mpiRank == 0:
            print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        dataArr = create_frac_spectra(freqArr=freqArr_Hz,
                                      IArr=IArr,
                                      QArr=QArr,
                                      UArr=UArr,
                                      dIArr=dIArr,
                                      dQArr=dQArr,
                                      dUArr=dUArr,
                                      polyOrd=polyOrd,
                                      verbose=True,
                                      fit_function=fit_function)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    if mpiRank == 0:
        print("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
        specFig = plt.figure(facecolor='w', figsize=(10, 6))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        if showPlots:
            specFig.canvas.draw()
            specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:
        print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum)
    #First check the working directory for a model. Failing that, try the install directory.
    try:
        spec = importlib.util.spec_from_file_location(
            "m%d" % modelNum, "models_ns/m%d.py" % modelNum)
        mod = importlib.util.module_from_spec(spec)
        sys.modules[mod] = mod
        spec.loader.exec_module(mod)
    except FileNotFoundError:
        try:
            RMtools_dir = os.path.dirname(
                importlib.util.find_spec('RMtools_1D').origin)
            spec = importlib.util.spec_from_file_location(
                "m%d" % modelNum, RMtools_dir + "/models_ns/m%d.py" % modelNum)
            mod = importlib.util.module_from_spec(spec)
            sys.modules[mod] = mod
            spec.loader.exec_module(mod)
        except:
            print(
                'Model could not be found! Please make sure model is present either in {}/models_ns/, or in {}/RMtools_1D/models_ns/'
                .format(os.getcwd(), RMtools_dir))
            sys.exit()

    global model
    model = mod.model

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in mod.inParms]
    labels = [x["label"] for x in mod.inParms]
    values = [x["value"] for x in mod.inParms]
    bounds = [x["bounds"] for x in mod.inParms]
    priorTypes = [x["priortype"] for x in mod.inParms]
    wraps = [x["wrap"] for x in mod.inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    # Look for multiple modes
    nestArgsDict['multimodal'] = True
    nestArgsDict['n_clustering_params'] = nDim
    nestArgsDict['verbose'] = False
    pmn.run(**nestArgsDict)
    # Save parnames for use with PyMultinest tools
    json.dump(parNames, open(f'{nestOut}/params.json', 'w'))

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]['median']
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        # Find the mode with the highest evidence
        mode_evidence = [
            mode['strictly local log-evidence'] for mode in statDict['modes']
        ]
        # Get the max and std for modal value
        modes = np.array(
            statDict['modes'][np.argmax(mode_evidence)]['maximum a posterior'])
        sigmas = np.array(statDict['modes'][np.argmax(mode_evidence)]['sigma'])
        upper = modes + sigma_clip * sigmas
        lower = modes - sigma_clip * sigmas

        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            # Get stats around modal value
            idx = (chains[:, i] > lower[i]) & (chains[:, i] < upper[i])
            p[i], errPlus[i], errMinus[i] = \
                        g(np.percentile(chains[idx, i], [15.72, 50, 84.27]))

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr)
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print("%s = %.4g (+%3g, -%3g)" % \
                  (parNames[i], p[i], errPlus[i], errMinus[i]))
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        outFile = prefixOut + "_m%d_nest.json" % modelNum
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "ln(EVIDENCE) ": toscalar(lnEvidence),
            "dLn(EVIDENCE)": toscalar(dLnEvidence),
            "nFree": toscalar(nFree),
            "Imodel": toscalar(IfitDict["p"]),
            "IfitChiSq": toscalar(IfitDict["chiSq"]),
            "IfitChiSqRed": toscalar(IfitDict["chiSqRed"]),
            "IfitPolyOrd": toscalar(IfitDict["polyOrd"]),
            "Ifitfreq0": toscalar(IfitDict["reference_frequency_Hz"])
        }
        json.dump(saveDict, open(outFile, "w"))
        outFile = prefixOut + "_m%d_nest.dat" % modelNum
        FH = open(outFile, "w")
        for k, v in saveDict.items():
            FH.write("%s=%s\n" % (k, v))
        FH.close()
        print("Results saved in JSON and .dat format to:\n '%s'\n" % outFile)

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del (labels[i])
            del (p[i])

        cornerFig = corner.corner(xs=chains,
                                  labels=labels,
                                  range=[(l, u) for l, u in zip(upper, lower)],
                                  truths=p,
                                  quantiles=[0.1572, 0.8427],
                                  bins=30)

        # Save the posterior chains to ASCII file
        if verbose: print("Saving the posterior chains to ASCII file.")
        outFile = prefixOut + "_m%d_posteriorChains.dat" % modelNum
        if verbose: print("> %s" % outFile)
        np.savetxt(outFile, chains)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
        pDict = {k: v for k, v in zip(parNames, p)}
        quModArr = model(pDict, lamSqHirArr_m2)
        model_dict = {
            'chains': chains,
            'model': model,
            'parNames': parNames,
            'values': values
        }
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModArr,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              model_dict=model_dict,
                              fig=specFig)
        specFig.canvas.draw()

        # Save the figures
        outFile = prefixOut + "fig_m%d_specfit.pdf" % modelNum
        specFig.savefig(outFile)
        print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
        outFile = prefixOut + "fig_m%d_corner.pdf" % modelNum
        cornerFig.savefig(outFile)
        print("Plot of posterior samples saved to \n '%s'\n" % outFile)

        # Display the figures
        if showPlots:
            plt.show()
            #cornerFig.show()

        # Clean up

    # Clean up MPI environment
    if mpiSwitch:
        MPI.Finalize()
Пример #5
0
def run_qufit(
    data,
    modelName,
    IMod=None,
    polyOrd=3,
    nBits=32,
    verbose=False,
    diagnostic_plots=True,
    values=None,
    bounds=None,
):

    """Function for Nested sampling fitting of Stokes parameters"""

    if mpiSwitch:
        # Get the processing environment
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    if isinstance(diagnostic_plots, str):
        outDir = diagnostic_plots
    else:
        # outDir=os.path.expanduser("~")
        outDir = "/tmp"
    nestOut = outDir + "/QUfit_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut):
            shutil.rmtree(nestOut, True)
        os.mkdir(nestOut)
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = data.copy()

    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, V, dI, dQ, dU, dV
    try:
        (freqArr_Hz, IArr, QArr, UArr, VArr, dIArr, dQArr, dUArr, dVArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, V, dI, dQ, dU, dV]")
    except Exception:
        print("pass data in format: [freq_Hz, I, Q, U, V, dI, dQ, dU, dV]")
        return

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        if IMod is None:
            dataArr = create_frac_spectra_test(
                freqArr=freqArr_GHz,
                IArr=IArr,
                QArr=QArr,
                UArr=UArr,
                dIArr=dIArr,
                dQArr=dQArr,
                dUArr=dUArr,
                VArr=VArr,
                dVArr=dVArr,
                polyOrd=polyOrd,
                IModArr=None,
                verbose=True,
            )
        else:
            dataArr = create_frac_spectra_test(
                freqArr=freqArr_GHz,
                IArr=IArr,
                QArr=QArr,
                UArr=UArr,
                dIArr=dIArr,
                dQArr=dQArr,
                dUArr=dUArr,
                VArr=VArr,
                dVArr=dVArr,
                polyOrd=polyOrd,
                IModArr=IMod(freqArr_Hz),
                verbose=True,
            )

    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, vArr, dqArr, duArr, dvArr, IfitDict) = dataArr

    # -------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    global model
    model = models.get_model(modelName)
    inParms = models.get_params(modelName)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in inParms]
    labels = [x["label"] for x in inParms]
    if values is None:
        values = [x["value"] for x in inParms]
    if bounds is None:
        bounds = [x["bounds"] for x in inParms]
    priorTypes = [x["priortype"] for x in inParms]
    wraps = [x["wrap"] for x in inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(
        parNames, lamSqArr_m2, QArr, dQArr, UArr, dUArr, VArr, dVArr, IModArr
    )

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), models.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    pmn.run(**nestArgsDict)

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]["median"]
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            p[i], errPlus[i], errMinus[i] = g(
                np.percentile(chains[:, i], [15.72, 50, 84.27])
            )

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(
            parNames, p, lamSqArr_m2, QArr, dQArr, UArr, dUArr, VArr, dVArr, IModArr
        )
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print(
                "%s = %.4g (+%3g, -%3g)" % (parNames[i], p[i], errPlus[i], errMinus[i])
            )
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        #         outFile = nestOut + "m%d_nest.json" % modelNum
        outFile = nestOut + "%s_nest.json" % modelName
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "IfitDict": IfitDict,
        }
        json.dump(saveDict, open(outFile, "w"))
        print("Results saved in JSON format to:\n '%s'\n" % outFile)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        pDict = {k: v for k, v in zip(parNames, p)}
        if IMod:
            IModHirArr = IMod(freqHirArr_Hz)
        else:
            IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        quModArr, vModArr = model(pDict, lamSqHirArr_m2, IModHirArr)
        specFig = plt.figure(figsize=(10, 6))
        plot_pqu_spectra_chime(
            freqArr_Hz=freqArr_Hz,
            IArr=IArr,
            qArr=qArr,
            uArr=uArr,
            dIArr=dIArr,
            dqArr=dqArr,
            duArr=duArr,
            freqHirArr_Hz=freqHirArr_Hz,
            IModArr=IModHirArr,
            qModArr=quModArr.real,
            uModArr=quModArr.imag,
            fig=specFig,
        )
        specFig.canvas.draw()

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del labels[i]
            del p[i]
        cornerFig = corner.corner(
            xs=chains,
            labels=labels,
            range=[0.99999] * nFree,
            truths=p,
            quantiles=[0.1572, 0.8427],
            bins=30,
        )

        # Plot the stokes Q vs. U (NEEDS WORK)
        qvsuFig = plot_q_vs_u_ax_chime(
            freqArr_Hz=freqArr_Hz,
            qArr=qArr,
            uArr=uArr,
            dqArr=dqArr,
            duArr=duArr,
            freqHirArr_Hz=freqHirArr_Hz,
            qModArr=quModArr.real / IModHirArr,
            uModArr=quModArr.imag / IModHirArr,
        )

        if diagnostic_plots:
            if isinstance(diagnostic_plots, bool):
                qvsuFig.show()
                sys.stdout.flush()
            else:
                outFile = diagnostic_plots + "/fig_%s_specfit.pdf" % modelName
                specFig.savefig(outFile)
                print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
                outFile = diagnostic_plots + "/fig_%s_corner.pdf" % modelName
                cornerFig.savefig(outFile)
                print("Plot of posterior samples saved to \n '%s'\n" % outFile)
                outFile = diagnostic_plots + "/fig_%s_q_vs_u.pdf" % modelName
                qvsuFig.savefig(outFile)

        pol_prod = zip(p, errPlus, errMinus)

        return (
            list(pol_prod),
            freqHirArr_Hz,
            qArr,
            uArr,
            vArr,
            dqArr,
            duArr,
            dvArr,
            IModArr,
            quModArr.real,
            quModArr.imag,
            vModArr,
        )
Пример #6
0
def run_rmsynth(data,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False,
                verbose=False,
                log=print):
    """
    Read the I, Q & U data and run RM-synthesis.
    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy
    try:
        if verbose:
            log("> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]",
                end=' ')
        (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy,
         dUArr_Jy) = data
        if verbose: log("... success.")
    except Exception:
        if verbose: log("...failed.")
        # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy
        try:
            if verbose:
                log("> Trying [freq_Hz, q_Jy, u_Jy,  dq_Jy, du_Jy]", end=' ')
            (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = data
            if verbose: log("... success.")
            noStokesI = True
        except Exception:
            if verbose: log("...failed.")
            if debug:
                log(traceback.format_exc())
            sys.exit()
    if verbose: log("Successfully read in the Stokes spectra.")

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        log("Warn: no Stokes I data in use.")
        IArr_Jy = np.ones_like(QArr_Jy)
        dIArr_Jy = np.zeros_like(QArr_Jy)

    # Convert to GHz and mJy for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    IArr_mJy = IArr_Jy * 1e3
    QArr_mJy = QArr_Jy * 1e3
    UArr_mJy = UArr_Jy * 1e3
    dIArr_mJy = dIArr_Jy * 1e3
    dQArr_mJy = dQArr_Jy * 1e3
    dUArr_mJy = dUArr_Jy * 1e3
    dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0
    dQUArr_Jy = dQUArr_mJy / 1e3

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr_mJy,
                                 QArr     = QArr_mJy,
                                 UArr     = UArr_mJy,
                                 dIArr    = dIArr_mJy,
                                 dQArr    = dQArr_mJy,
                                 dUArr    = dUArr_mJy,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        if verbose: log("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModHirArr_mJy,
                              fig=specFig)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        #        try:
        #            specFig.canvas.toolbar.pack_forget()
        #            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        #        except Exception:
        #            pass

        # Display the figure
        #        if not plt.isinteractive():
        #            specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr_mJy,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr_mJy,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr_mJy,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS (mJy bm$^{-1}$)')
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
#            rmsFig.show()

#-------------------------------------------------------------------------#

# Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    if verbose:
        log("PhiArr = %.2f to %.2f by %.2f (%d chans)." %
            (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM))

    # Calculate the weighting as 1/sigma^2 or all 1s (uniform)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr_mJy, 2.0)
    else:
        weightType = "uniform"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    if verbose: log("Weight type is '%s'." % weightType)

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr,
                                            dataU=uArr,
                                            lambdaSqArr_m2=lambdaSqArr_m2,
                                            phiArr_radm2=phiArr_radm2,
                                            weightArr=weightArr,
                                            nBits=nBits,
                                            verbose=True,
                                            log=log)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = ~np.isfinite(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = True,
                        log             = log)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0_mJybm / 1e3)  # FDF is in Jy

    # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
    #dFDFth_Jybm = np.sqrt(1./np.sum(1./dQUArr_Jy**2.))
    dFDFth_Jybm = np.sqrt(
        np.sum(weightArr**2 * dQUArr_Jy**2) / (np.sum(weightArr))**2)

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth_Jybm,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy))
    mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm)
    if mDict['phiPeakPIfit_rm2'] == None:
        log('Peak is at edge of RM spectrum! Peak fitting failed!\n')
        log('Rerunning with Phi_max twice as large.')
        #The following code re-runs everything with higher phiMax,
        #Then overwrite the appropriate variables so as to continue on without
        #interuption.
        mDict, aDict = run_rmsynth(data=data,
                                   polyOrd=polyOrd,
                                   phiMax_radm2=phiMax_radm2 * 2,
                                   dPhi_radm2=dPhi_radm2,
                                   nSamples=nSamples,
                                   weightType=weightType,
                                   fitRMSF=fitRMSF,
                                   noStokesI=noStokesI,
                                   nBits=nBits,
                                   showPlots=False,
                                   debug=debug,
                                   verbose=verbose)
        phiArr_radm2 = aDict["phiArr_radm2"]
        phi2Arr_radm2 = aDict["phi2Arr_radm2"]
        RMSFArr = aDict["RMSFArr"]
        freqArr_Hz = aDict["freqArr_Hz"]
        weightArr = aDict["weightArr"]
        dirtyFDF = aDict["dirtyFDF"]

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    #add array dictionary
    aDict = dict()
    aDict["phiArr_radm2"] = phiArr_radm2
    aDict["phi2Arr_radm2"] = phi2Arr_radm2
    aDict["RMSFArr"] = RMSFArr
    aDict["freqArr_Hz"] = freqArr_Hz
    aDict["weightArr"] = weightArr
    aDict["dirtyFDF"] = dirtyFDF

    if verbose:
        # Print the results to the screen
        log()
        log('-' * 80)
        log('RESULTS:\n')
        log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))

        log('Pol Angle = %.4g (+/-%.4g) deg' %
            (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]))
        log('Pol Angle 0 = %.4g (+/-%.4g) deg' %
            (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]))
        log('Peak FD = %.4g (+/-%.4g) rad/m^2' %
            (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]))
        log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9))
        log('I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"]))
        log('Peak PI = %.4g (+/-%.4g) mJy/beam' %
            (mDict["ampPeakPIfit_Jybm"] * 1e3,
             mDict["dAmpPeakPIfit_Jybm"] * 1e3))
        log('QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3))
        log('FDF Noise (theory)   = %.4g mJy/beam' %
            (mDict["dFDFth_Jybm"] * 1e3))
        log('FDF Noise (Corrected MAD) = %.4g mJy/beam' %
            (mDict["dFDFcorMAD_Jybm"] * 1e3))
        log('FDF Noise (rms)   = %.4g mJy/beam' %
            (mDict["dFDFrms_Jybm"] * 1e3))
        log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
        log('sigma_add(q) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"],
             mDict["dSigmaAddMinusQ"]))
        log('sigma_add(u) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"],
             mDict["dSigmaAddMinusU"]))
        log()
        log('-' * 80)

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig)

        # Use the custom navigation toolbar
#        try:
#            fdfFig.canvas.toolbar.pack_forget()
#            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
#        except Exception:
#            pass

# Display the figure
#        fdfFig.show()

# Pause if plotting enabled
    if showPlots or debug:
        plt.show()
        #        #if verbose: print "Press <RETURN> to exit ...",


#        input()

    return mDict, aDict
Пример #7
0
def run_qufit(
    data,
    modelNum,
    polyOrd=2,
    nBits=32,
    noStokesI=False,
    showPlots=False,
    debug=False,
    verbose=False,
    sampler="dynesty",
    fit_function="log",
    ncores=1,
    nlive=1000,
    prefixOut="prefixOut",
):
    """Carry out QU-fitting using the supplied parameters:
        data (list): Contains frequency and polarization data as either:
            [freq_Hz, I, Q, U, dI, dQ, dU]
                freq_Hz (array_like): Frequency of each channel in Hz.
                I (array_like): Stokes I intensity in each channel.
                Q (array_like): Stokes Q intensity in each channel.
                U (array_like): Stokes U intensity in each channel.
                dI (array_like): Error in Stokes I intensity in each channel.
                dQ (array_like): Error in Stokes Q intensity in each channel.
                dU (array_like): Error in Stokes U intensity in each channel.
            or
            [freq_Hz, q, u,  dq, du]
                freq_Hz (array_like): Frequency of each channel in Hz.
                q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
                u (array_like): Fractional Stokes U intensity (U/I) in each channel.
                dq (array_like): Error in fractional Stokes Q intensity in each channel.
                du (array_like): Error in fractional Stokes U intensity in each channel.
        modelNum (int, required): number of model to be fit to data. Models and
             priors are specified as Python code in files called 'mX.py' within
            the 'models_ns' directory.
        outDir (str): relative or absolute path to save outputs to. Defaults to
            working directory.
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum (used to
            normalize Q and U values). Defaults to 3 (cubic).
        nBits (int): number of bits to use in internal calculations.
        noStokesI (bool): set True if the Stokes I spectrum should be ignored.
        showPlots (bool): Set true if the spectrum and parameter space plots
            should be displayed.
        sigma_clip (float): How many standard deviations to clip around the
            mean of each mode in the parameter postierors.
        debug (bool): Display debug messages.
        verbose (bool): Print verbose messages/results to terminal.

        Returns: nothing. Results saved to files and/or printed to terminal."""

    # Output prefix is derived from the input file name
    nestOut = f"{prefixOut}_m{modelNum}_{sampler}/"

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
        print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
            print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    dataArr = create_frac_spectra(
        freqArr=freqArr_Hz,
        IArr=IArr,
        QArr=QArr,
        UArr=UArr,
        dIArr=dIArr,
        dQArr=dQArr,
        dUArr=dUArr,
        polyOrd=polyOrd,
        verbose=True,
        fit_function=fit_function,
    )
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    print("Plotting the input data and spectral index fit.")
    freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
    IModHirArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
    specFig = plt.figure(facecolor="w", figsize=(10, 6))
    plot_Ipqu_spectra_fig(
        freqArr_Hz=freqArr_Hz,
        IArr=IArr,
        qArr=qArr,
        uArr=uArr,
        dIArr=dIArr,
        dqArr=dqArr,
        duArr=duArr,
        freqHirArr_Hz=freqHirArr_Hz,
        IModArr=IModHirArr,
        fig=specFig,
    )

    # Use the custom navigation toolbar
    try:
        specFig.canvas.toolbar.pack_forget()
        CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
    except Exception:
        pass

    # Display the figure
    if showPlots:
        specFig.canvas.draw()
        specFig.show()

    # -------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    mod = load_model(modelNum, verbose=True)

    model = mod.model

    # Let's time the sampler
    startTime = time.time()

    parNames = []
    priorTypes = []
    labels = []
    bounds = []
    wraps = []
    for key, prior in mod.priors.items():
        if prior.__class__.__name__ == "Constraint":
            continue
        parNames.append(key)
        priorTypes.append(prior.__class__.__name__)
        labels.append(prior.latex_label)
        bounds.append([prior.minimum, prior.maximum])
        wraps.append(prior.boundary)
    nDim = len(parNames)
    fixedMsk = [0 if x == "DeltaFunction" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    priors = mod.priors

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr,
                         modelNum)
    # Let's time the sampler
    startTime = time.time()

    result = bilby.run_sampler(
        likelihood=lnlike,
        priors=priors,
        sampler=sampler,
        nlive=nlive,
        npool=ncores,
        outdir=nestOut,
        label="m%d" % modelNum,
        plot=True,
    )

    # Do the post-processing on one processor
    endTime = time.time()
    # Best guess here - taking the maximum likelihood value
    lnLike = np.max(result.log_likelihood_evaluations)
    lnEvidence = result.log_evidence
    dLnEvidence = result.log_evidence_err

    # Get the best-fitting values & uncertainties

    p = [None] * nDim
    errPlus = [None] * nDim
    errMinus = [None] * nDim
    # g = lambda v: (v[1], v[2]-v[1], v[1]-v[0])
    for i in range(nDim):
        summary = result.get_one_dimensional_median_and_error_bar(parNames[i])
        # Get stats around modal value
        p[i], errPlus[i], errMinus[i] = (
            summary.median,
            summary.plus,
            summary.minus,
        )

    # Calculate goodness-of-fit parameters
    nData = 2.0 * len(lamSqArr_m2)
    dof = nData - nFree - 1
    chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr,
                        model)
    chiSqRed = chiSq / dof
    AIC = 2.0 * nFree - 2.0 * lnLike
    AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
    BIC = nFree * np.log(nData) - 2.0 * lnLike

    # Summary of run
    print("")
    print("-" * 80)
    print("SUMMARY OF SAMPLING RUN:")
    print("#-PROCESSORS  = %d" % ncores)
    print("RUN-TIME      = %.2f" % (endTime - startTime))
    print("DOF           = %d" % dof)
    print("CHISQ:        = %.3g" % chiSq)
    print("CHISQ RED     = %.3g" % chiSqRed)
    print("AIC:          = %.3g" % AIC)
    print("AICc          = %.3g" % AICc)
    print("BIC           = %.3g" % BIC)
    print("ln(EVIDENCE)  = %.3g" % lnEvidence)
    print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
    print("")
    print("-" * 80)
    print("RESULTS:\n")
    for i in range(len(p)):
        print("%s = %.4g (+%3g, -%3g)" %
              (parNames[i], p[i], errPlus[i], errMinus[i]))
    print("-" * 80)
    print("")

    # Create a save dictionary and store final p in values
    outFile = f"{prefixOut}_m{modelNum}_{sampler}.json"
    IfitDict["p"] = toscalar(IfitDict["p"].tolist())
    saveDict = {
        "parNames": toscalar(parNames),
        "labels": toscalar(labels),
        "values": toscalar(p),
        "errPlus": toscalar(errPlus),
        "errMinus": toscalar(errMinus),
        "bounds": toscalar(bounds),
        "priorTypes": toscalar(priorTypes),
        "wraps": toscalar(wraps),
        "dof": toscalar(dof),
        "chiSq": toscalar(chiSq),
        "chiSqRed": toscalar(chiSqRed),
        "AIC": toscalar(AIC),
        "AICc": toscalar(AICc),
        "BIC": toscalar(BIC),
        "ln(EVIDENCE) ": toscalar(lnEvidence),
        "dLn(EVIDENCE)": toscalar(dLnEvidence),
        "nFree": toscalar(nFree),
        "Imodel": toscalar(",".join([str(x) for x in IfitDict["p"]])),
        "Imodel_errs":
        toscalar(",".join([str(x) for x in IfitDict["perror"]])),
        "IfitChiSq": toscalar(IfitDict["chiSq"]),
        "IfitChiSqRed": toscalar(IfitDict["chiSqRed"]),
        "IfitPolyOrd": toscalar(IfitDict["polyOrd"]),
        "Ifitfreq0": toscalar(IfitDict["reference_frequency_Hz"]),
    }
    json.dump(saveDict, open(outFile, "w"))
    outFile = f"{prefixOut}_m{modelNum}_{sampler}.dat"
    FH = open(outFile, "w")
    for k, v in saveDict.items():
        FH.write("%s=%s\n" % (k, v))
    FH.close()
    print("Results saved in JSON and .dat format to:\n '%s'\n" % outFile)

    # Plot the posterior samples in a corner plot
    # chains =  aObj.get_equal_weighted_posterior()
    # chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
    # iFixed = [i for i, e in enumerate(fixedMsk) if e==0]
    # chains = np.delete(chains, iFixed, 1)
    # for i in sorted(iFixed, reverse=True):
    #     del(labels[i])
    #     del(p[i])

    cornerFig = result.plot_corner()

    # Save the posterior chains to ASCII file

    # Plot the data and best-fitting model
    lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
    freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
    IModArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
    pDict = {k: v for k, v in zip(parNames, p)}
    quModArr = model(pDict, lamSqHirArr_m2)
    model_dict = {
        "model": model,
        "parNames": parNames,
        "posterior": result.posterior,
    }
    specFig.clf()
    plot_Ipqu_spectra_fig(
        freqArr_Hz=freqArr_Hz,
        IArr=IArr,
        qArr=qArr,
        uArr=uArr,
        dIArr=dIArr,
        dqArr=dqArr,
        duArr=duArr,
        freqHirArr_Hz=freqHirArr_Hz,
        IModArr=IModArr,
        qModArr=quModArr.real,
        uModArr=quModArr.imag,
        model_dict=model_dict,
        fig=specFig,
    )
    specFig.canvas.draw()

    # Save the figures
    outFile = prefixOut + "fig_m%d_specfit.pdf" % modelNum
    specFig.set_canvas(specFig.canvas)
    specFig.figure.savefig(outFile)
    print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
    outFile = prefixOut + "fig_m%d_corner.pdf" % modelNum
    cornerFig.set_canvas(cornerFig.canvas)
    cornerFig.savefig(outFile)
    print("Plot of posterior samples saved to \n '%s'\n" % outFile)

    # Display the figures
    if showPlots:
        specFig.figure.show()
        cornerFig.show()