Exemplo n.º 1
0
def run_qufit(dataFile,
              modelNum,
              outDir="",
              polyOrd=3,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              debug=False,
              verbose=False):
    """Function controlling the fitting procedure."""

    # Get the processing environment
    if mpiSwitch:
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)
    nestOut = prefixOut + "_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut):
            shutil.rmtree(nestOut, True)
        os.mkdir(nestOut)
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr
            if mpiRank == 0:
                print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            if mpiSwitch:
                MPI.Finalize()
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if mpiRank == 0:
            print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        dataArr = create_frac_spectra(freqArr=freqArr_GHz,
                                      IArr=IArr,
                                      QArr=QArr,
                                      UArr=UArr,
                                      dIArr=dIArr,
                                      dQArr=dQArr,
                                      dUArr=dUArr,
                                      polyOrd=polyOrd,
                                      verbose=True)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    if mpiRank == 0:
        print("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(10, 6))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        if showPlots:
            specFig.canvas.draw()
            specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:
        print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum)
    mod = imp.load_source("m%d" % modelNum, "models_ns/m%d.py" % modelNum)
    global model
    model = mod.model

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in mod.inParms]
    labels = [x["label"] for x in mod.inParms]
    values = [x["value"] for x in mod.inParms]
    bounds = [x["bounds"] for x in mod.inParms]
    priorTypes = [x["priortype"] for x in mod.inParms]
    wraps = [x["wrap"] for x in mod.inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    pmn.run(**nestArgsDict)

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]['median']
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            p[i], errPlus[i], errMinus[i] = \
                        g(np.percentile(chains[:, i], [15.72, 50, 84.27]))

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr)
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print("%s = %.4g (+%3g, -%3g)" % \
                  (parNames[i], p[i], errPlus[i], errMinus[i]))
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        outFile = prefixOut + "_m%d_nest.json" % modelNum
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "IfitDict": IfitDict
        }
        json.dump(saveDict, open(outFile, "w"))
        print("Results saved in JSON format to:\n '%s'\n" % outFile)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        pDict = {k: v for k, v in zip(parNames, p)}
        quModArr = model(pDict, lamSqHirArr_m2)
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModArr,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              fig=specFig)
        specFig.canvas.draw()

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del (labels[i])
            del (p[i])
        cornerFig = corner.corner(xs=chains,
                                  labels=labels,
                                  range=[0.99999] * nFree,
                                  truths=p,
                                  quantiles=[0.1572, 0.8427],
                                  bins=30)

        # Save the figures
        outFile = nestOut + "fig_m%d_specfit.pdf" % modelNum
        specFig.savefig(outFile)
        print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
        outFile = nestOut + "fig_m%d_corner.pdf" % modelNum
        cornerFig.savefig(outFile)
        print("Plot of posterior samples saved to \n '%s'\n" % outFile)

        # Display the figures
        if showPlots:
            specFig.show()
            cornerFig.show()
            print("> Press <RETURN> to exit ...", end="")
            sys.stdout.flush()
            input()

        # Clean up
        plt.close(specFig)
        plt.close(cornerFig)

    # Clean up MPI environment
    if mpiSwitch:
        MPI.Finalize()
Exemplo n.º 2
0
def run_rmsynth(data,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False,
                verbose=False,
                log=print,
                units='Jy/beam',
                e_num=1):
    """Run RM synthesis on 1D data.

    Args:
        data (list): Contains frequency and polarization data as either:
            [freq_Hz, I, Q, U, dI, dQ, dU]
                freq_Hz (array_like): Frequency of each channel in Hz.
                I (array_like): Stokes I intensity in each channel.
                Q (array_like): Stokes Q intensity in each channel.
                U (array_like): Stokes U intensity in each channel.
                dI (array_like): Error in Stokes I intensity in each channel.
                dQ (array_like): Error in Stokes Q intensity in each channel.
                dU (array_like): Error in Stokes U intensity in each channel.
            or
            [freq_Hz, q, u,  dq, du]
                freq_Hz (array_like): Frequency of each channel in Hz.
                q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
                u (array_like): Fractional Stokes U intensity (U/I) in each channel.
                dq (array_like): Error in fractional Stokes Q intensity in each channel.
                du (array_like): Error in fractional Stokes U intensity in each channel.

    Kwargs:
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
        phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
        dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
        nSamples (float): Number of samples across the RMSF.
        weightType (str): Can be "variance" or "uniform"
            "variance" -- Weight by uncertainty in Q and U.
            "uniform" -- Weight uniformly (i.e. with 1s)
        fitRMSF (bool): Fit a Gaussian to the RMSF?
        noStokesI (bool: Is Stokes I data provided?
        phiNoise_radm2 (float): ????
        nBits (int): Precision of floating point numbers.
        showPlots (bool): Show plots?
        debug (bool): Turn on debugging messages & plots?
        verbose (bool): Verbosity.
        log (function): Which logging function to use.
        units (str): Units of data.

    Returns:
        mDict (dict): Summary of RM synthesis results.
        aDict (dict): Data output by RM synthesis.

    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
        if verbose: log("... success.")
    except Exception:
        if verbose: log("...failed.")
        # freq_Hz, q, u, dq, du
        try:
            if verbose: log("> Trying [freq_Hz, q, u,  dq, du]", end=' ')
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
            if verbose: log("... success.")
            noStokesI = True
        except Exception:
            if verbose: log("...failed.")
            if debug:
                log(traceback.format_exc())
            sys.exit()
    if verbose: log("Successfully read in the Stokes spectra.")

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if verbose: log("Warn: no Stokes I data in use.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    dQUArr = (dQArr + dUArr) / 2.0

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr,
                                 QArr     = QArr,
                                 UArr     = UArr,
                                 dIArr    = dIArr,
                                 dQArr    = dQArr,
                                 dUArr    = dUArr,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        if verbose: log("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig,
                              units=units)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        #        try:
        #            specFig.canvas.toolbar.pack_forget()
        #            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        #        except Exception:
        #            pass

        # Display the figure
        #        if not plt.isinteractive():
        #            specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS ' + units)
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
#            rmsFig.show()

#-------------------------------------------------------------------------#

# Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    if verbose:
        log("PhiArr = %.2f to %.2f by %.2f (%d chans)." %
            (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM))

    # Calculate the weighting as 1/sigma^2 or all 1s (uniform)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr, 2.0)
    else:
        weightType = "uniform"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    if verbose: log("Weight type is '%s'." % weightType)

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2, mylist = do_rmsynth_planes(
        dataQ=qArr,
        dataU=uArr,
        lambdaSqArr_m2=lambdaSqArr_m2,
        phiArr_radm2=phiArr_radm2,
        weightArr=weightArr,
        nBits=nBits,
        verbose=verbose,
        log=log,
        e_num=e_num)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = ~np.isfinite(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = verbose,
                        log             = log)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0 = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0
                 )  # FDF is in fracpol units initially, convert back to flux

    # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
    weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
    dFDFth = np.sqrt(
        np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) /
        (np.sum(weightArr))**2)

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0"] = toscalar(Ifreq0)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU"] = toscalar(nanmedian(dQUArr))
    mDict["dFDFth"] = toscalar(dFDFth)
    mDict["units"] = units
    mDict['dQUArr'] = dQUArr

    if fitDict["fitStatus"] >= 128:
        log("WARNING: Stokes I model contains negative values!")
    elif fitDict["fitStatus"] >= 64:
        log("Caution: Stokes I model has low signal-to-noise.")

    #Add information on nature of channels:
    good_channels = np.where(np.logical_and(weightArr != 0,
                                            np.isfinite(qArr)))[0]
    mDict["min_freq"] = float(np.min(freqArr_Hz[good_channels]))
    mDict["max_freq"] = float(np.max(freqArr_Hz[good_channels]))
    mDict["N_channels"] = good_channels.size
    mDict["median_channel_width"] = float(np.median(np.diff(freqArr_Hz)))

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit"] / (Ifreq0)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    #add array dictionary
    aDict = dict()
    aDict["phiArr_radm2"] = phiArr_radm2
    aDict["phi2Arr_radm2"] = phi2Arr_radm2
    aDict["RMSFArr"] = RMSFArr
    aDict["freqArr_Hz"] = freqArr_Hz
    aDict["weightArr"] = weightArr
    aDict["dirtyFDF"] = dirtyFDF

    if verbose:
        # Print the results to the screen
        log()
        log('-' * 80)
        log('RESULTS:\n')
        log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))

        log('Pol Angle = %.4g (+/-%.4g) deg' %
            (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]))
        log('Pol Angle 0 = %.4g (+/-%.4g) deg' %
            (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]))
        log('Peak FD = %.4g (+/-%.4g) rad/m^2' %
            (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]))
        log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9))
        log('I freq0 = %.4g %s' % (mDict["Ifreq0"], units))
        log('Peak PI = %.4g (+/-%.4g) %s' %
            (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"], units))
        log('QU Noise = %.4g %s' % (mDict["dQU"], units))
        log('FDF Noise (theory)   = %.4g %s' % (mDict["dFDFth"], units))
        log('FDF Noise (Corrected MAD) = %.4g %s' %
            (mDict["dFDFcorMAD"], units))
        log('FDF Noise (rms)   = %.4g %s' % (mDict["dFDFrms"], units))
        log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
        log('sigma_add(q) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"],
             mDict["dSigmaAddMinusQ"]))
        log('sigma_add(u) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"],
             mDict["dSigmaAddMinusU"]))
        log()
        log('-' * 80)

    myfig = plotmylist(mylist)
    plt.show()
    myfig.show()

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig,
                          units=units)

        # Use the custom navigation toolbar
#        try:
#            fdfFig.canvas.toolbar.pack_forget()
#            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
#        except Exception:
#            pass

# Display the figure
#        fdfFig.show()

# Pause if plotting enabled
    if showPlots or debug:
        plt.show()
        #        #if verbose: print "Press <RETURN> to exit ...",


#        input()

    return mDict, aDict, mylist
Exemplo n.º 3
0
def run_rmsynth(dataFile,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False):
    """
    Read the I, Q & U data from the ASCII file and run RM-synthesis.
    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)

    # Read the data-file. Format=space-delimited, comments="#".
    print "Reading the data file '%s':" % dataFile
    # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy
    try:
        print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]",
        (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy,
         dIArr_Jy, dQArr_Jy, dUArr_Jy) = \
         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
        print "... success."
    except Exception:
        print "...failed."
        # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy
        try:
            print "> Trying [freq_Hz, q_Jy, u_Jy,  dq_Jy, du_Jy]",
            (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \
                         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
            print "... success."
            noStokesI = True
        except Exception:
            print "...failed."
            if debug:
                print traceback.format_exc()
            sys.exit()
    print "Successfully read in the Stokes spectra."

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        print "Warn: no Stokes I data in use."
        IArr_Jy = np.ones_like(QArr_Jy)
        dIArr_Jy = np.zeros_like(QArr_Jy)

    # Convert to GHz and mJy for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    IArr_mJy = IArr_Jy * 1e3
    QArr_mJy = QArr_Jy * 1e3
    UArr_mJy = UArr_Jy * 1e3
    dIArr_mJy = dIArr_Jy * 1e3
    dQArr_mJy = dQArr_Jy * 1e3
    dUArr_mJy = dUArr_Jy * 1e3
    dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0
    dQUArr_Jy = dQUArr_mJy / 1e3

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr_mJy,
                                 QArr     = QArr_mJy,
                                 UArr     = UArr_mJy,
                                 dIArr    = dIArr_mJy,
                                 dQArr    = dQArr_mJy,
                                 dUArr    = dUArr_mJy,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        print "Plotting the input data and spectral index fit."
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModHirArr_mJy,
                              fig=specFig)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr_mJy,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr_mJy,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr_mJy,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS (mJy bm$^{-1}$)')
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
            rmsFig.show()

    #-------------------------------------------------------------------------#

    # Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    print "PhiArr = %.2f to %.2f by %.2f (%d chans)." % (
        phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)

    # Calculate the weighting as 1/sigma^2 or all 1s (natural)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr_mJy, 2.0)
    else:
        weightType = "natural"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    print "Weight type is '%s'." % weightType

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr,
                                            dataU=uArr,
                                            lambdaSqArr_m2=lambdaSqArr_m2,
                                            phiArr_radm2=phiArr_radm2,
                                            weightArr=weightArr,
                                            nBits=nBits,
                                            verbose=True)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = np.isnan(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = True)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    print "> RM-synthesis completed in %.2f seconds." % cputime

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0_mJybm / 1e3)  # FDF is in Jy

    # Calculate the theoretical noise in the FDF
    dFDFth_Jybm = np.sqrt(1. / np.sum(1. / dQUArr_Jy**2.))

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth_Jybm,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy))
    mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm)

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    # Save the  dirty FDF, RMSF and weight array to ASCII files
    print "Saving the dirty FDF, RMSF weight arrays to ASCII files."
    outFile = prefixOut + "_FDFdirty.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(phiArr_radm2, dirtyFDF.real, dirtyFDF.imag))
    outFile = prefixOut + "_RMSF.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(phi2Arr_radm2, RMSFArr.real, RMSFArr.imag))
    outFile = prefixOut + "_weight.dat"
    print "> %s" % outFile
    np.savetxt(outFile, zip(freqArr_Hz, weightArr))

    # Save the measurements to a "key=value" text file
    print "Saving the measurements on the FDF in 'key=val' and JSON formats."
    outFile = prefixOut + "_RMsynth.dat"
    print "> %s" % outFile
    FH = open(outFile, "w")
    for k, v in mDict.iteritems():
        FH.write("%s=%s\n" % (k, v))
    FH.close()
    outFile = prefixOut + "_RMsynth.json"
    print "> %s" % outFile
    json.dump(dict(mDict), open(outFile, "w"))

    # Print the results to the screen
    print
    print '-' * 80
    print 'RESULTS:\n'
    print 'FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])

    print 'Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
                                              mDict["dPolAngleFit_deg"])
    print 'Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
                                                mDict["dPolAngle0Fit_deg"])
    print 'Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
                                                mDict["dPhiPeakPIfit_rm2"])
    print 'freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9)
    print 'I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"])
    print 'Peak PI = %.4g (+/-%.4g) mJy/beam' % (
        mDict["ampPeakPIfit_Jybm"] * 1e3, mDict["dAmpPeakPIfit_Jybm"] * 1e3)
    print 'QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3)
    print 'FDF Noise (theory)   = %.4g mJy/beam' % (mDict["dFDFth_Jybm"] * 1e3)
    print 'FDF SNR = %.4g ' % (mDict["snrPIfit"])
    print 'sigma_add(q) = %.4g (+%.4g, -%.4g)' % (
        mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])
    print 'sigma_add(u) = %.4g (+%.4g, -%.4g)' % (
        mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])
    print
    print '-' * 80

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig)

        # Use the custom navigation toolbar
        try:
            fdfFig.canvas.toolbar.pack_forget()
            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        fdfFig.show()

    # Pause if plotting enabled
    if showPlots or debug:
        print "Press <RETURN> to exit ...",
        raw_input()
Exemplo n.º 4
0
def run_qufit(dataFile,
              modelNum,
              outDir="",
              polyOrd=2,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              sigma_clip=5,
              debug=False,
              verbose=False,
              restart=True,
              fit_function='log'):
    """Carry out QU-fitting using the supplied parameters:
        dataFile (str, required): relative or absolute path of file containing 
            frequencies and Stokes parameters with errors.
        modelNum (int, required): number of model to be fit to data. Models and
             priors are specified as Python code in files called 'mX.py' within  
            the 'models_ns' directory.
        outDir (str): relative or absolute path to save outputs to. Defaults to
            working directory.
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum (used to
            normalize Q and U values). Defaults to 3 (cubic).
        nBits (int): number of bits to use in internal calculations.
        noStokesI (bool): set True if the Stokes I spectrum should be ignored.
        showPlots (bool): Set true if the spectrum and parameter space plots
            should be displayed.
        sigma_clip (float): How many standard deviations to clip around the 
            mean of each mode in the parameter postierors.
        debug (bool): Display debug messages.
        verbose (bool): Print verbose messages/results to terminal.
        
        Returns: nothing. Results saved to files and/or printed to terminal."""

    # Get the processing environment
    if mpiSwitch:
        mpiComm = MPI.COMM_WORLD
        mpiSize = mpiComm.Get_size()
        mpiRank = mpiComm.Get_rank()
    else:
        mpiSize = 1
        mpiRank = 0

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)
    nestOut = f"{prefixOut}_m{modelNum}_nest/"
    if mpiRank == 0:
        if os.path.exists(nestOut) and restart:
            shutil.rmtree(nestOut, True)
            os.mkdir(nestOut)
        elif not os.path.exists(nestOut) and restart:
            os.mkdir(nestOut)
        elif not os.path.exists(nestOut) and not restart:
            print("Restart requested, but previous run not found!")
            raise Exception(f"{nestOut} does not exist.")
    if mpiSwitch:
        mpiComm.Barrier()

    # Read the data file in the root process
    if mpiRank == 0:
        dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr
        if mpiRank == 0:
            print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr
            if mpiRank == 0:
                print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            if mpiSwitch:
                MPI.Finalize()
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        if mpiRank == 0:
            print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    if mpiRank == 0:
        dataArr = create_frac_spectra(freqArr=freqArr_Hz,
                                      IArr=IArr,
                                      QArr=QArr,
                                      UArr=UArr,
                                      dIArr=dIArr,
                                      dQArr=dQArr,
                                      dUArr=dUArr,
                                      polyOrd=polyOrd,
                                      verbose=True,
                                      fit_function=fit_function)
    else:
        dataArr = None
    if mpiSwitch:
        dataArr = mpiComm.bcast(dataArr, root=0)
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    if mpiRank == 0:
        print("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
        specFig = plt.figure(facecolor='w', figsize=(10, 6))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModHirArr,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        if showPlots:
            specFig.canvas.draw()
            specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:
        print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum)
    #First check the working directory for a model. Failing that, try the install directory.
    try:
        spec = importlib.util.spec_from_file_location(
            "m%d" % modelNum, "models_ns/m%d.py" % modelNum)
        mod = importlib.util.module_from_spec(spec)
        sys.modules[mod] = mod
        spec.loader.exec_module(mod)
    except FileNotFoundError:
        try:
            RMtools_dir = os.path.dirname(
                importlib.util.find_spec('RMtools_1D').origin)
            spec = importlib.util.spec_from_file_location(
                "m%d" % modelNum, RMtools_dir + "/models_ns/m%d.py" % modelNum)
            mod = importlib.util.module_from_spec(spec)
            sys.modules[mod] = mod
            spec.loader.exec_module(mod)
        except:
            print(
                'Model could not be found! Please make sure model is present either in {}/models_ns/, or in {}/RMtools_1D/models_ns/'
                .format(os.getcwd(), RMtools_dir))
            sys.exit()

    global model
    model = mod.model

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Unpack the inParms structure
    parNames = [x["parname"] for x in mod.inParms]
    labels = [x["label"] for x in mod.inParms]
    values = [x["value"] for x in mod.inParms]
    bounds = [x["bounds"] for x in mod.inParms]
    priorTypes = [x["priortype"] for x in mod.inParms]
    wraps = [x["wrap"] for x in mod.inParms]
    nDim = len(priorTypes)
    fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    prior = prior_call(priorTypes, bounds, values)

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr)

    # Let's time the sampler
    if mpiRank == 0:
        startTime = time.time()

    # Run nested sampling using PyMultiNest
    nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict)
    nestArgsDict["n_params"] = nDim
    nestArgsDict["n_dims"] = nDim
    nestArgsDict["outputfiles_basename"] = nestOut
    nestArgsDict["LogLikelihood"] = lnlike
    nestArgsDict["Prior"] = prior
    # Look for multiple modes
    nestArgsDict['multimodal'] = True
    nestArgsDict['n_clustering_params'] = nDim
    nestArgsDict['verbose'] = False
    pmn.run(**nestArgsDict)
    # Save parnames for use with PyMultinest tools
    json.dump(parNames, open(f'{nestOut}/params.json', 'w'))

    # Do the post-processing on one processor
    if mpiSwitch:
        mpiComm.Barrier()
    if mpiRank == 0:

        # Query the analyser object for results
        aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut)
        statDict = aObj.get_stats()
        fitDict = aObj.get_best_fit()
        endTime = time.time()

        # NOTE: The Analyser methods do not work well for parameters with
        # posteriors that overlap the wrap value. Use np.percentile instead.
        pMed = [None] * nDim
        for i in range(nDim):
            pMed[i] = statDict["marginals"][i]['median']
        lnLike = fitDict["log_likelihood"]
        lnEvidence = statDict["nested sampling global log-evidence"]
        dLnEvidence = statDict["nested sampling global log-evidence error"]

        # Get the best-fitting values & uncertainties directly from chains
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, pMed)
        # Find the mode with the highest evidence
        mode_evidence = [
            mode['strictly local log-evidence'] for mode in statDict['modes']
        ]
        # Get the max and std for modal value
        modes = np.array(
            statDict['modes'][np.argmax(mode_evidence)]['maximum a posterior'])
        sigmas = np.array(statDict['modes'][np.argmax(mode_evidence)]['sigma'])
        upper = modes + sigma_clip * sigmas
        lower = modes - sigma_clip * sigmas

        p = [None] * nDim
        errPlus = [None] * nDim
        errMinus = [None] * nDim
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        for i in range(nDim):
            # Get stats around modal value
            idx = (chains[:, i] > lower[i]) & (chains[:, i] < upper[i])
            p[i], errPlus[i], errMinus[i] = \
                        g(np.percentile(chains[idx, i], [15.72, 50, 84.27]))

        # Calculate goodness-of-fit parameters
        nData = 2.0 * len(lamSqArr_m2)
        dof = nData - nFree - 1
        chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr)
        chiSqRed = chiSq / dof
        AIC = 2.0 * nFree - 2.0 * lnLike
        AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
        BIC = nFree * np.log(nData) - 2.0 * lnLike

        # Summary of run
        print("")
        print("-" * 80)
        print("SUMMARY OF SAMPLING RUN:")
        print("#-PROCESSORS  = %d" % mpiSize)
        print("RUN-TIME      = %.2f" % (endTime - startTime))
        print("DOF           = %d" % dof)
        print("CHISQ:        = %.3g" % chiSq)
        print("CHISQ RED     = %.3g" % chiSqRed)
        print("AIC:          = %.3g" % AIC)
        print("AICc          = %.3g" % AICc)
        print("BIC           = %.3g" % BIC)
        print("ln(EVIDENCE)  = %.3g" % lnEvidence)
        print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
        print("")
        print("-" * 80)
        print("RESULTS:\n")
        for i in range(len(p)):
            print("%s = %.4g (+%3g, -%3g)" % \
                  (parNames[i], p[i], errPlus[i], errMinus[i]))
        print("-" * 80)
        print("")

        # Create a save dictionary and store final p in values
        outFile = prefixOut + "_m%d_nest.json" % modelNum
        IfitDict["p"] = toscalar(IfitDict["p"].tolist())
        saveDict = {
            "parNames": toscalar(parNames),
            "labels": toscalar(labels),
            "values": toscalar(p),
            "errPlus": toscalar(errPlus),
            "errMinus": toscalar(errMinus),
            "bounds": toscalar(bounds),
            "priorTypes": toscalar(priorTypes),
            "wraps": toscalar(wraps),
            "dof": toscalar(dof),
            "chiSq": toscalar(chiSq),
            "chiSqRed": toscalar(chiSqRed),
            "AIC": toscalar(AIC),
            "AICc": toscalar(AICc),
            "BIC": toscalar(BIC),
            "ln(EVIDENCE) ": toscalar(lnEvidence),
            "dLn(EVIDENCE)": toscalar(dLnEvidence),
            "nFree": toscalar(nFree),
            "Imodel": toscalar(IfitDict["p"]),
            "IfitChiSq": toscalar(IfitDict["chiSq"]),
            "IfitChiSqRed": toscalar(IfitDict["chiSqRed"]),
            "IfitPolyOrd": toscalar(IfitDict["polyOrd"]),
            "Ifitfreq0": toscalar(IfitDict["reference_frequency_Hz"])
        }
        json.dump(saveDict, open(outFile, "w"))
        outFile = prefixOut + "_m%d_nest.dat" % modelNum
        FH = open(outFile, "w")
        for k, v in saveDict.items():
            FH.write("%s=%s\n" % (k, v))
        FH.close()
        print("Results saved in JSON and .dat format to:\n '%s'\n" % outFile)

        # Plot the posterior samples in a corner plot
        chains = aObj.get_equal_weighted_posterior()
        chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
        iFixed = [i for i, e in enumerate(fixedMsk) if e == 0]
        chains = np.delete(chains, iFixed, 1)
        for i in sorted(iFixed, reverse=True):
            del (labels[i])
            del (p[i])

        cornerFig = corner.corner(xs=chains,
                                  labels=labels,
                                  range=[(l, u) for l, u in zip(upper, lower)],
                                  truths=p,
                                  quantiles=[0.1572, 0.8427],
                                  bins=30)

        # Save the posterior chains to ASCII file
        if verbose: print("Saving the posterior chains to ASCII file.")
        outFile = prefixOut + "_m%d_posteriorChains.dat" % modelNum
        if verbose: print("> %s" % outFile)
        np.savetxt(outFile, chains)

        # Plot the data and best-fitting model
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
        pDict = {k: v for k, v in zip(parNames, p)}
        quModArr = model(pDict, lamSqHirArr_m2)
        model_dict = {
            'chains': chains,
            'model': model,
            'parNames': parNames,
            'values': values
        }
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr=IArr,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr=dIArr,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr=IModArr,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              model_dict=model_dict,
                              fig=specFig)
        specFig.canvas.draw()

        # Save the figures
        outFile = prefixOut + "fig_m%d_specfit.pdf" % modelNum
        specFig.savefig(outFile)
        print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
        outFile = prefixOut + "fig_m%d_corner.pdf" % modelNum
        cornerFig.savefig(outFile)
        print("Plot of posterior samples saved to \n '%s'\n" % outFile)

        # Display the figures
        if showPlots:
            plt.show()
            #cornerFig.show()

        # Clean up

    # Clean up MPI environment
    if mpiSwitch:
        MPI.Finalize()
Exemplo n.º 5
0
def run_qufit(dataFile,
              modelNum,
              nWalkers=200,
              nThreads=2,
              outDir="",
              polyOrd=3,
              nBits=32,
              noStokesI=False,
              showPlots=False,
              debug=False):
    """Root function controlling the fitting procedure."""

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # Output prefix is derived from the input file name
    prefixOut, ext = os.path.splitext(dataFile)

    # Read the data-file. Format=space-delimited, comments='#'.
    print "Reading the data file '%s':" % dataFile
    # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy
    try:
        print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]",
        (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy,
         dIArr_Jy, dQArr_Jy, dUArr_Jy) = \
         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
        print "... success."
    except Exception:
        print "...failed."
        # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy
        try:
            print "Reading [freq_Hz, q_Jy, u_Jy,  dq_Jy, du_Jy]",
            (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \
                         np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
            print "... success."
            noStokesI = True
        except Exception:
            print "...failed."
            if debug:
                print traceback.format_exc()
            sys.exit()

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        print "Warn: no Stokes I data in use."
        IArr_Jy = np.ones_like(QArr_Jy)
        dIArr_Jy = np.zeros_like(QArr_Jy)

    # Convert to GHz and mJy for convenience
    print "Successfully read in the Stokes spectra."
    freqArr_GHz = freqArr_Hz / 1e9
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    IArr_mJy = IArr_Jy * 1e3
    QArr_mJy = QArr_Jy * 1e3
    UArr_mJy = UArr_Jy * 1e3
    dIArr_mJy = dIArr_Jy * 1e3
    dQArr_mJy = dQArr_Jy * 1e3
    dUArr_mJy = dUArr_Jy * 1e3

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, IfitDict = \
             create_frac_spectra(freqArr=freqArr_GHz,
                                 IArr=IArr_mJy,
                                 QArr=QArr_mJy,
                                 UArr=UArr_mJy,
                                 dIArr=dIArr_mJy,
                                 dQArr=dQArr_mJy,
                                 dUArr=dUArr_mJy,
                                 polyOrd=polyOrd,
                                 verbose=True)

    # Plot the data and the Stokes I model fit
    if showPlots:
        print "Plotting the input data and spectral index fit."
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr_mJy = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModHirArr_mJy,
                              fig=specFig)

        # Use the custom navigation toolbar
        try:
            specFig.canvas.toolbar.pack_forget()
            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        except Exception:
            pass

        # Display the figure
        specFig.canvas.draw()
        specFig.show()

    #-------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    print "\nLoading the model from file 'models_mc/m%d.py' ..." % modelNum
    mod = imp.load_source("m%d" % modelNum, "models_mc/m%d.py" % modelNum)
    global model
    model = mod.model

    # Select the inputs to the chosen model by creating an instance of
    # inParmClass. Seed walker vectors based on the preset seed-range.
    ip = inParmClass(mod.inParms, mod.runParmDict)
    p0 = ip.seed_walkers(nWalkers)

    # Call the lnlike_total function to test it works OK
    print "> Calling ln(likelihood) as a test: L = ",
    L = lnlike_total(p0[0], ip, lamSqArr_m2, qArr, dqArr, uArr, duArr)
    print L
    if np.isnan(L):
        print "> Err: ln(likelihood) function returned NaN."
        sys.exit()

    # Define an MCMC sampler object. 3rd argument is ln(likelihood) function
    # and 4th is a list of additional arguments to lnlike() after walker.
    sampler = emcee.EnsembleSampler(
        nWalkers,
        ip.nDim,
        lnlike_total,
        args=[ip, lamSqArr_m2, qArr, dqArr, uArr, duArr],
        threads=nThreads)

    # Initialise the trace figure
    if showPlots:
        chainFigLst = []
        for i in range(ip.nDim):
            chainFigLst.append(plt.figure(figsize=(8, 8)))

    # Run the sampler to explore parameter space
    print 'Explore parameter space for %d steps ...' % ip.nExploreSteps,
    sys.stdout.flush()
    pos, prob, state = sampler.run_mcmc(p0, ip.nExploreSteps)
    print 'done.'

    # Reset the samplers to a small range around the max(likelihood)
    maxPos = pos[np.argmax(prob, 0)]
    pos = [maxPos + 1e-9 * np.random.rand(ip.nDim) for i in range(nWalkers)]

    # Plot the chains for the exploration step
    if showPlots:
        print 'Plotting the walker chains for the wide exploration step.'
        titleStr = "Exploring all likely parameter space."
        plot_trace(sampler, ip.inParms, title=titleStr)
    sampler.reset()

    # Initialise the structure for holding the binned statistics
    # List of (list of dictionaries)
    statLst = []
    for i in range(ip.nDim):
        statLst.append({
            "stepBin": [],
            "medBin": [],
            "stdBin": [],
            "medAll": [],
            "stdAll": [],
            "B": [],
            "W": [],
            "R": [],
            "stat1": [],
            "stat2": []
        })
    likeStatDict = {
        "stepBin": [],
        "medBin": [],
        "stdBin": [],
        "stat1": [],
        "stat2": []
    }

    # Run the sampler, polling the statistics every nPollSteps
    print "Running the sampler and polling every %d steps:" % (ip.nPollSteps)
    if ip.runMode == "auto":
        print "> Will attempt to detect MCMC chain stability."
    print "Maximum steps set to %d." % ip.maxSteps
    print ""
    while True:
        convergeFlg = False
        convergeFlgLst = []
        print ".",
        sys.stdout.flush()

        # Run the sampler for nPollSteps
        pos, prob, state = sampler.run_mcmc(pos, ip.nPollSteps)

        # Perform wrapping if ip.inParms[n]['wrap'] is set.
        sampler, pos = wrap_chains(ip.inParms, sampler, pos, shift=True)

        # Measure the statistics of the binned likelihood
        stepBin = sampler.chain.shape[1] - (ip.nPollSteps / 2.0)
        likeWin = sampler.lnprobability[:, -ip.nPollSteps:]
        likeStatDict["stepBin"].append(stepBin)
        likeStatDict["medBin"].append(np.median(likeWin))
        likeStatDict["stdBin"].append(np.std(likeWin))

        # Measure the statistics of the binned chains
        chainWin = sampler.chain[:, -ip.nPollSteps:, :]
        for i in range(ip.nDim):
            mDict = gelman_rubin(chainWin[:, :, i])
            statLst[i]["stepBin"].append(stepBin)
            statLst[i]["medBin"].append(np.median(chainWin[:, :, i]))
            statLst[i]["stdBin"].append(np.std(chainWin[:, :, i]))
            statLst[i]["medAll"].append(mDict["medAll"])
            statLst[i]["stdAll"].append(mDict["stdAll"])
            statLst[i]["B"].append(mDict["B"])
            statLst[i]["W"].append(mDict["W"])
            statLst[i]["R"].append(mDict["R"])

            # Check for convergence in each parameter trace
            convergeFlg, stat1, stat2 = \
                chk_trace_stable(statDict=statLst[i],
                                 nCycles=ip.nStableCycles,
                                 stdLim=ip.parmStdLim,
                                 medLim=ip.parmMedLim)
            convergeFlgLst.append(convergeFlg)
            statLst[i]["stat1"].append(stat1)
            statLst[i]["stat2"].append(stat2)

        # Check for convergence in the likelihood trace
        convergeFlg, stat1, stat2 = \
            chk_trace_stable(statDict=likeStatDict,
                             nCycles=ip.nStableCycles,
                             stdLim=ip.likeStdLim,
                             medLim=ip.likeMedLim)
        convergeFlgLst.append(convergeFlg)
        likeStatDict["stat1"].append(stat1)
        likeStatDict["stat2"].append(stat2)

        # If all traces have converged, continue
        if ip.runMode == "auto" and np.all(convergeFlgLst):
            print "\n>Stability threshold passed!"
            break

        # Continue at the upper step limit
        if sampler.chain.shape[1] > ip.maxSteps:
            print "\nMaximum number of steps performed."
            break

    # Plot the likelihood trace and statistics
    if debug:
        plot_like_stats(likeStatDict)
        if not showPlots:
            print "Press <RETURN> ...",
            raw_input()

    # Discard the burn-in section of the chain
    print "\nUsing the last %d steps to sample the posterior.\n" % ip.nSteps
    chainCut = sampler.chain[:, -ip.nSteps:, :]
    s = chainCut.shape
    flatChainCut = chainCut.reshape(s[0] * s[1], s[2])
    lnprobCut = sampler.lnprobability[-ip.nSteps:, :]
    flatLnprobCut = lnprobCut.flatten()

    # Plot the chains
    if showPlots:
        print 'Plotting the walker chains after polling ...'
        plot_trace_stats(sampler,
                         ip.inParms,
                         figLst=chainFigLst,
                         nSteps=ip.nSteps,
                         statLst=statLst)

    # Determine the best-fit values from the 16th, 50th and 84th percentile
    # Marginalizing in MCMC is simple: select the axis of the parameter.
    # Update ip.inParms with the best-fitting values.
    pBest = []
    print
    print '-' * 80
    print 'RESULTS:\n'
    for i in range(len(ip.fxi)):
        fChain = flatChainCut[:, i]
        g = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
        best, errPlus, errMinus = g(np.percentile(fChain, [15.72, 50, 84.27]))
        pBest.append(best)
        ip.inParms[ip.fxi[i]]['value'] = best
        ip.inParms[ip.fxi[i]]['errPlus'] = errPlus
        ip.inParms[ip.fxi[i]]['errMinus'] = errMinus
        print '%s = %.4g (+%3g, -%3g)' % (ip.inParms[ip.fxi[i]]['parname'],
                                          best, errPlus, errMinus)

    # Calculate goodness-of-fit parameters
    nData = 2.0 * len(lamSqArr_m2)
    dof = nData - ip.nDim - 1
    chiSq = chisq_model(ip.inParms, lamSqArr_m2, qArr, dqArr, uArr, duArr)
    chiSqRed = chiSq / dof

    # Calculate the information criteria
    lnLike = lnlike_model(ip.inParms, lamSqArr_m2, qArr, dqArr, uArr, duArr)
    AIC = 2.0 * ip.nDim - 2.0 * lnLike
    AICc = 2.0 * ip.nDim * (ip.nDim + 1) / (nData - ip.nDim - 1) - 2.0 * lnLike
    BIC = ip.nDim * np.log(nData) - 2.0 * lnLike
    print
    print "DOF:", dof
    print "CHISQ:", chiSq
    print "CHISQ RED:", chiSqRed
    print "AIC:", AIC
    print "AICc", AICc
    print "BIC", BIC
    print
    print '-' * 80

    # Create a save dictionary
    saveObj = {
        "inParms": ip.inParms,
        "flatchain": flatChainCut,
        "flatlnprob": flatLnprobCut,
        "chain": chainCut,
        "lnprob": lnprobCut,
        "convergeFlg": np.all(convergeFlgLst),
        "dof": dof,
        "chiSq": chiSq,
        "chiSqRed": chiSqRed,
        "AIC": AIC,
        "AICc": AICc,
        "BIC": BIC,
        "IfitDict": IfitDict
    }

    # Save the Markov chain and results to a Python Pickle
    outFile = prefixOut + "_MCMC.pkl"
    if os.path.exists(outFile):
        os.remove(outFile)
    fh = open(outFile, "wb")
    pkl.dump(saveObj, fh)
    fh.close()
    print "> Results and MCMC chains saved in pickle file '%s'" % outFile

    # Plot the results
    if showPlots:
        print "Plotting the best-fitting model."
        lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
        freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
        IModArr_mJy = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9)
        quModArr = model(ip.inParms, lamSqHirArr_m2)
        specFig.clf()
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModArr_mJy,
                              qModArr=quModArr.real,
                              uModArr=quModArr.imag,
                              fig=specFig)
        specFig.canvas.draw()
        print "> Press <RETURN> to exit ...",
        raw_input()
Exemplo n.º 6
0
def run_rmsynth(data,
                polyOrd=3,
                phiMax_radm2=None,
                dPhi_radm2=None,
                nSamples=10.0,
                weightType="variance",
                fitRMSF=False,
                noStokesI=False,
                phiNoise_radm2=1e6,
                nBits=32,
                showPlots=False,
                debug=False,
                verbose=False,
                log=print):
    """
    Read the I, Q & U data and run RM-synthesis.
    """

    # Default data types
    dtFloat = "float" + str(nBits)
    dtComplex = "complex" + str(2 * nBits)

    # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy
    try:
        if verbose:
            log("> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]",
                end=' ')
        (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy,
         dUArr_Jy) = data
        if verbose: log("... success.")
    except Exception:
        if verbose: log("...failed.")
        # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy
        try:
            if verbose:
                log("> Trying [freq_Hz, q_Jy, u_Jy,  dq_Jy, du_Jy]", end=' ')
            (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = data
            if verbose: log("... success.")
            noStokesI = True
        except Exception:
            if verbose: log("...failed.")
            if debug:
                log(traceback.format_exc())
            sys.exit()
    if verbose: log("Successfully read in the Stokes spectra.")

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        log("Warn: no Stokes I data in use.")
        IArr_Jy = np.ones_like(QArr_Jy)
        dIArr_Jy = np.zeros_like(QArr_Jy)

    # Convert to GHz and mJy for convenience
    freqArr_GHz = freqArr_Hz / 1e9
    IArr_mJy = IArr_Jy * 1e3
    QArr_mJy = QArr_Jy * 1e3
    UArr_mJy = UArr_Jy * 1e3
    dIArr_mJy = dIArr_Jy * 1e3
    dQArr_mJy = dQArr_Jy * 1e3
    dUArr_mJy = dUArr_Jy * 1e3
    dQUArr_mJy = (dQArr_mJy + dUArr_mJy) / 2.0
    dQUArr_Jy = dQUArr_mJy / 1e3

    # Fit the Stokes I spectrum and create the fractional spectra
    IModArr, qArr, uArr, dqArr, duArr, fitDict = \
             create_frac_spectra(freqArr  = freqArr_GHz,
                                 IArr     = IArr_mJy,
                                 QArr     = QArr_mJy,
                                 UArr     = UArr_mJy,
                                 dIArr    = dIArr_mJy,
                                 dQArr    = dQArr_mJy,
                                 dUArr    = dUArr_mJy,
                                 polyOrd  = polyOrd,
                                 verbose  = True,
                                 debug    = debug)

    # Plot the data and the Stokes I model fit
    if showPlots:
        if verbose: log("Plotting the input data and spectral index fit.")
        freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
        IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz / 1e9)
        specFig = plt.figure(figsize=(12.0, 8))
        plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz,
                              IArr_mJy=IArr_mJy,
                              qArr=qArr,
                              uArr=uArr,
                              dIArr_mJy=dIArr_mJy,
                              dqArr=dqArr,
                              duArr=duArr,
                              freqHirArr_Hz=freqHirArr_Hz,
                              IModArr_mJy=IModHirArr_mJy,
                              fig=specFig)

        # Use the custom navigation toolbar (does not work on Mac OS X)
        #        try:
        #            specFig.canvas.toolbar.pack_forget()
        #            CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
        #        except Exception:
        #            pass

        # Display the figure
        #        if not plt.isinteractive():
        #            specFig.show()

        # DEBUG (plot the Q, U and average RMS spectrum)
        if debug:
            rmsFig = plt.figure(figsize=(12.0, 8))
            ax = rmsFig.add_subplot(111)
            ax.plot(freqArr_Hz / 1e9,
                    dQUArr_mJy,
                    marker='o',
                    color='k',
                    lw=0.5,
                    label='rms <QU>')
            ax.plot(freqArr_Hz / 1e9,
                    dQArr_mJy,
                    marker='o',
                    color='b',
                    lw=0.5,
                    label='rms Q')
            ax.plot(freqArr_Hz / 1e9,
                    dUArr_mJy,
                    marker='o',
                    color='r',
                    lw=0.5,
                    label='rms U')
            xRange = (np.nanmax(freqArr_Hz) - np.nanmin(freqArr_Hz)) / 1e9
            ax.set_xlim(
                np.min(freqArr_Hz) / 1e9 - xRange * 0.05,
                np.max(freqArr_Hz) / 1e9 + xRange * 0.05)
            ax.set_xlabel('$\\nu$ (GHz)')
            ax.set_ylabel('RMS (mJy bm$^{-1}$)')
            ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
#            rmsFig.show()

#-------------------------------------------------------------------------#

# Calculate some wavelength parameters
    lambdaSqArr_m2 = np.power(C / freqArr_Hz, 2.0)
    dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
    lambdaSqRange_m2 = (np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2))
    dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
    dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))

    # Set the Faraday depth range
    fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
    if dPhi_radm2 is None:
        dPhi_radm2 = fwhmRMSF_radm2 / nSamples
    if phiMax_radm2 is None:
        phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
        phiMax_radm2 = max(phiMax_radm2, 600.0)  # Force the minimum phiMax

    # Faraday depth sampling. Zero always centred on middle channel
    nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
    startPhi_radm2 = -(nChanRM - 1.0) * dPhi_radm2 / 2.0
    stopPhi_radm2 = +(nChanRM - 1.0) * dPhi_radm2 / 2.0
    phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
    phiArr_radm2 = phiArr_radm2.astype(dtFloat)
    if verbose:
        log("PhiArr = %.2f to %.2f by %.2f (%d chans)." %
            (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM))

    # Calculate the weighting as 1/sigma^2 or all 1s (uniform)
    if weightType == "variance":
        weightArr = 1.0 / np.power(dQUArr_mJy, 2.0)
    else:
        weightType = "uniform"
        weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
    if verbose: log("Weight type is '%s'." % weightType)

    startTime = time.time()

    # Perform RM-synthesis on the spectrum
    dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ=qArr,
                                            dataU=uArr,
                                            lambdaSqArr_m2=lambdaSqArr_m2,
                                            phiArr_radm2=phiArr_radm2,
                                            weightArr=weightArr,
                                            nBits=nBits,
                                            verbose=True,
                                            log=log)

    # Calculate the Rotation Measure Spread Function
    RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
        get_rmsf_planes(lambdaSqArr_m2  = lambdaSqArr_m2,
                        phiArr_radm2    = phiArr_radm2,
                        weightArr       = weightArr,
                        mskArr          = ~np.isfinite(qArr),
                        lam0Sq_m2       = lam0Sq_m2,
                        double          = True,
                        fitRMSF         = fitRMSF,
                        fitRMSFreal     = False,
                        nBits           = nBits,
                        verbose         = True,
                        log             = log)
    fwhmRMSF = float(fwhmRMSFArr)

    # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#

    #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
    #          do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)

    #-------------------------------------------------------------------------#

    endTime = time.time()
    cputime = (endTime - startTime)
    if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)

    # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
    # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy
    freq0_Hz = C / m.sqrt(lam0Sq_m2)
    Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz / 1e9)
    dirtyFDF *= (Ifreq0_mJybm / 1e3)  # FDF is in Jy

    # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
    #dFDFth_Jybm = np.sqrt(1./np.sum(1./dQUArr_Jy**2.))
    dFDFth_Jybm = np.sqrt(
        np.sum(weightArr**2 * dQUArr_Jy**2) / (np.sum(weightArr))**2)

    # Measure the parameters of the dirty FDF
    # Use the theoretical noise to calculate uncertainties
    mDict = measure_FDF_parms(FDF=dirtyFDF,
                              phiArr=phiArr_radm2,
                              fwhmRMSF=fwhmRMSF,
                              dFDF=dFDFth_Jybm,
                              lamSqArr_m2=lambdaSqArr_m2,
                              lam0Sq=lam0Sq_m2)
    mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm)
    mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
    mDict["IfitStat"] = fitDict["fitStatus"]
    mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
    mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
    mDict["freq0_Hz"] = toscalar(freq0_Hz)
    mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
    mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy))
    mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm)
    if mDict['phiPeakPIfit_rm2'] == None:
        log('Peak is at edge of RM spectrum! Peak fitting failed!\n')
        log('Rerunning with Phi_max twice as large.')
        #The following code re-runs everything with higher phiMax,
        #Then overwrite the appropriate variables so as to continue on without
        #interuption.
        mDict, aDict = run_rmsynth(data=data,
                                   polyOrd=polyOrd,
                                   phiMax_radm2=phiMax_radm2 * 2,
                                   dPhi_radm2=dPhi_radm2,
                                   nSamples=nSamples,
                                   weightType=weightType,
                                   fitRMSF=fitRMSF,
                                   noStokesI=noStokesI,
                                   nBits=nBits,
                                   showPlots=False,
                                   debug=debug,
                                   verbose=verbose)
        phiArr_radm2 = aDict["phiArr_radm2"]
        phi2Arr_radm2 = aDict["phi2Arr_radm2"]
        RMSFArr = aDict["RMSFArr"]
        freqArr_Hz = aDict["freqArr_Hz"]
        weightArr = aDict["weightArr"]
        dirtyFDF = aDict["dirtyFDF"]

    # Measure the complexity of the q and u spectra
    mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"] / (Ifreq0_mJybm / 1e3)
    mD, pD = measure_qu_complexity(freqArr_Hz=freqArr_Hz,
                                   qArr=qArr,
                                   uArr=uArr,
                                   dqArr=dqArr,
                                   duArr=duArr,
                                   fracPol=mDict["fracPol"],
                                   psi0_deg=mDict["polAngle0Fit_deg"],
                                   RM_radm2=mDict["phiPeakPIfit_rm2"])
    mDict.update(mD)

    # Debugging plots for spectral complexity measure
    if debug:
        tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
                                     qArr=pD["yArrQ"],
                                     dqArr=pD["dyArrQ"],
                                     sigmaAddqArr=pD["sigmaAddArrQ"],
                                     chiSqRedqArr=pD["chiSqRedArrQ"],
                                     probqArr=pD["probArrQ"],
                                     uArr=pD["yArrU"],
                                     duArr=pD["dyArrU"],
                                     sigmaAdduArr=pD["sigmaAddArrU"],
                                     chiSqReduArr=pD["chiSqRedArrU"],
                                     probuArr=pD["probArrU"],
                                     mDict=mDict)
        tmpFig.show()

    #add array dictionary
    aDict = dict()
    aDict["phiArr_radm2"] = phiArr_radm2
    aDict["phi2Arr_radm2"] = phi2Arr_radm2
    aDict["RMSFArr"] = RMSFArr
    aDict["freqArr_Hz"] = freqArr_Hz
    aDict["weightArr"] = weightArr
    aDict["dirtyFDF"] = dirtyFDF

    if verbose:
        # Print the results to the screen
        log()
        log('-' * 80)
        log('RESULTS:\n')
        log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))

        log('Pol Angle = %.4g (+/-%.4g) deg' %
            (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]))
        log('Pol Angle 0 = %.4g (+/-%.4g) deg' %
            (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]))
        log('Peak FD = %.4g (+/-%.4g) rad/m^2' %
            (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]))
        log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"] / 1e9))
        log('I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"]))
        log('Peak PI = %.4g (+/-%.4g) mJy/beam' %
            (mDict["ampPeakPIfit_Jybm"] * 1e3,
             mDict["dAmpPeakPIfit_Jybm"] * 1e3))
        log('QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"] * 1e3))
        log('FDF Noise (theory)   = %.4g mJy/beam' %
            (mDict["dFDFth_Jybm"] * 1e3))
        log('FDF Noise (Corrected MAD) = %.4g mJy/beam' %
            (mDict["dFDFcorMAD_Jybm"] * 1e3))
        log('FDF Noise (rms)   = %.4g mJy/beam' %
            (mDict["dFDFrms_Jybm"] * 1e3))
        log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
        log('sigma_add(q) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"],
             mDict["dSigmaAddMinusQ"]))
        log('sigma_add(u) = %.4g (+%.4g, -%.4g)' %
            (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"],
             mDict["dSigmaAddMinusU"]))
        log()
        log('-' * 80)

    # Plot the RM Spread Function and dirty FDF
    if showPlots:
        fdfFig = plt.figure(figsize=(12.0, 8))
        plot_rmsf_fdf_fig(phiArr=phiArr_radm2,
                          FDF=dirtyFDF,
                          phi2Arr=phi2Arr_radm2,
                          RMSFArr=RMSFArr,
                          fwhmRMSF=fwhmRMSF,
                          vLine=mDict["phiPeakPIfit_rm2"],
                          fig=fdfFig)

        # Use the custom navigation toolbar
#        try:
#            fdfFig.canvas.toolbar.pack_forget()
#            CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
#        except Exception:
#            pass

# Display the figure
#        fdfFig.show()

# Pause if plotting enabled
    if showPlots or debug:
        plt.show()
        #        #if verbose: print "Press <RETURN> to exit ...",


#        input()

    return mDict, aDict
Exemplo n.º 7
0
def run_qufit(
    data,
    modelNum,
    polyOrd=2,
    nBits=32,
    noStokesI=False,
    showPlots=False,
    debug=False,
    verbose=False,
    sampler="dynesty",
    fit_function="log",
    ncores=1,
    nlive=1000,
    prefixOut="prefixOut",
):
    """Carry out QU-fitting using the supplied parameters:
        data (list): Contains frequency and polarization data as either:
            [freq_Hz, I, Q, U, dI, dQ, dU]
                freq_Hz (array_like): Frequency of each channel in Hz.
                I (array_like): Stokes I intensity in each channel.
                Q (array_like): Stokes Q intensity in each channel.
                U (array_like): Stokes U intensity in each channel.
                dI (array_like): Error in Stokes I intensity in each channel.
                dQ (array_like): Error in Stokes Q intensity in each channel.
                dU (array_like): Error in Stokes U intensity in each channel.
            or
            [freq_Hz, q, u,  dq, du]
                freq_Hz (array_like): Frequency of each channel in Hz.
                q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
                u (array_like): Fractional Stokes U intensity (U/I) in each channel.
                dq (array_like): Error in fractional Stokes Q intensity in each channel.
                du (array_like): Error in fractional Stokes U intensity in each channel.
        modelNum (int, required): number of model to be fit to data. Models and
             priors are specified as Python code in files called 'mX.py' within
            the 'models_ns' directory.
        outDir (str): relative or absolute path to save outputs to. Defaults to
            working directory.
        polyOrd (int): Order of polynomial to fit to Stokes I spectrum (used to
            normalize Q and U values). Defaults to 3 (cubic).
        nBits (int): number of bits to use in internal calculations.
        noStokesI (bool): set True if the Stokes I spectrum should be ignored.
        showPlots (bool): Set true if the spectrum and parameter space plots
            should be displayed.
        sigma_clip (float): How many standard deviations to clip around the
            mean of each mode in the parameter postierors.
        debug (bool): Display debug messages.
        verbose (bool): Print verbose messages/results to terminal.

        Returns: nothing. Results saved to files and/or printed to terminal."""

    # Output prefix is derived from the input file name
    nestOut = f"{prefixOut}_m{modelNum}_{sampler}/"

    # Parse the data array
    # freq_Hz, I, Q, U, dI, dQ, dU
    try:
        (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
        print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]")
    except Exception:
        # freq_Hz, Q, U, dQ, dU
        try:
            (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
            print("\nFormat [freq_Hz, Q, U,  dQ, dU]")
            noStokesI = True
        except Exception:
            print("\nError: Failed to parse data file!")
            if debug:
                print(traceback.format_exc())
            return

    # If no Stokes I present, create a dummy spectrum = unity
    if noStokesI:
        print("Note: no Stokes I data - assuming fractional polarisation.")
        IArr = np.ones_like(QArr)
        dIArr = np.zeros_like(QArr)

    # Convert to GHz for convenience
    lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0)

    # Fit the Stokes I spectrum and create the fractional spectra
    dataArr = create_frac_spectra(
        freqArr=freqArr_Hz,
        IArr=IArr,
        QArr=QArr,
        UArr=UArr,
        dIArr=dIArr,
        dQArr=dQArr,
        dUArr=dUArr,
        polyOrd=polyOrd,
        verbose=True,
        fit_function=fit_function,
    )
    (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr

    # Plot the data and the Stokes I model fit
    print("Plotting the input data and spectral index fit.")
    freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
    IModHirArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
    specFig = plt.figure(facecolor="w", figsize=(10, 6))
    plot_Ipqu_spectra_fig(
        freqArr_Hz=freqArr_Hz,
        IArr=IArr,
        qArr=qArr,
        uArr=uArr,
        dIArr=dIArr,
        dqArr=dqArr,
        duArr=duArr,
        freqHirArr_Hz=freqHirArr_Hz,
        IModArr=IModHirArr,
        fig=specFig,
    )

    # Use the custom navigation toolbar
    try:
        specFig.canvas.toolbar.pack_forget()
        CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
    except Exception:
        pass

    # Display the figure
    if showPlots:
        specFig.canvas.draw()
        specFig.show()

    # -------------------------------------------------------------------------#

    # Load the model and parameters from the relevant file
    mod = load_model(modelNum, verbose=True)

    model = mod.model

    # Let's time the sampler
    startTime = time.time()

    parNames = []
    priorTypes = []
    labels = []
    bounds = []
    wraps = []
    for key, prior in mod.priors.items():
        if prior.__class__.__name__ == "Constraint":
            continue
        parNames.append(key)
        priorTypes.append(prior.__class__.__name__)
        labels.append(prior.latex_label)
        bounds.append([prior.minimum, prior.maximum])
        wraps.append(prior.boundary)
    nDim = len(parNames)
    fixedMsk = [0 if x == "DeltaFunction" else 1 for x in priorTypes]
    nFree = sum(fixedMsk)

    # Set the prior function given the bounds of each parameter
    priors = mod.priors

    # Set the likelihood function given the data
    lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr,
                         modelNum)
    # Let's time the sampler
    startTime = time.time()

    result = bilby.run_sampler(
        likelihood=lnlike,
        priors=priors,
        sampler=sampler,
        nlive=nlive,
        npool=ncores,
        outdir=nestOut,
        label="m%d" % modelNum,
        plot=True,
    )

    # Do the post-processing on one processor
    endTime = time.time()
    # Best guess here - taking the maximum likelihood value
    lnLike = np.max(result.log_likelihood_evaluations)
    lnEvidence = result.log_evidence
    dLnEvidence = result.log_evidence_err

    # Get the best-fitting values & uncertainties

    p = [None] * nDim
    errPlus = [None] * nDim
    errMinus = [None] * nDim
    # g = lambda v: (v[1], v[2]-v[1], v[1]-v[0])
    for i in range(nDim):
        summary = result.get_one_dimensional_median_and_error_bar(parNames[i])
        # Get stats around modal value
        p[i], errPlus[i], errMinus[i] = (
            summary.median,
            summary.plus,
            summary.minus,
        )

    # Calculate goodness-of-fit parameters
    nData = 2.0 * len(lamSqArr_m2)
    dof = nData - nFree - 1
    chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr,
                        model)
    chiSqRed = chiSq / dof
    AIC = 2.0 * nFree - 2.0 * lnLike
    AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike
    BIC = nFree * np.log(nData) - 2.0 * lnLike

    # Summary of run
    print("")
    print("-" * 80)
    print("SUMMARY OF SAMPLING RUN:")
    print("#-PROCESSORS  = %d" % ncores)
    print("RUN-TIME      = %.2f" % (endTime - startTime))
    print("DOF           = %d" % dof)
    print("CHISQ:        = %.3g" % chiSq)
    print("CHISQ RED     = %.3g" % chiSqRed)
    print("AIC:          = %.3g" % AIC)
    print("AICc          = %.3g" % AICc)
    print("BIC           = %.3g" % BIC)
    print("ln(EVIDENCE)  = %.3g" % lnEvidence)
    print("dLn(EVIDENCE) = %.3g" % dLnEvidence)
    print("")
    print("-" * 80)
    print("RESULTS:\n")
    for i in range(len(p)):
        print("%s = %.4g (+%3g, -%3g)" %
              (parNames[i], p[i], errPlus[i], errMinus[i]))
    print("-" * 80)
    print("")

    # Create a save dictionary and store final p in values
    outFile = f"{prefixOut}_m{modelNum}_{sampler}.json"
    IfitDict["p"] = toscalar(IfitDict["p"].tolist())
    saveDict = {
        "parNames": toscalar(parNames),
        "labels": toscalar(labels),
        "values": toscalar(p),
        "errPlus": toscalar(errPlus),
        "errMinus": toscalar(errMinus),
        "bounds": toscalar(bounds),
        "priorTypes": toscalar(priorTypes),
        "wraps": toscalar(wraps),
        "dof": toscalar(dof),
        "chiSq": toscalar(chiSq),
        "chiSqRed": toscalar(chiSqRed),
        "AIC": toscalar(AIC),
        "AICc": toscalar(AICc),
        "BIC": toscalar(BIC),
        "ln(EVIDENCE) ": toscalar(lnEvidence),
        "dLn(EVIDENCE)": toscalar(dLnEvidence),
        "nFree": toscalar(nFree),
        "Imodel": toscalar(",".join([str(x) for x in IfitDict["p"]])),
        "Imodel_errs":
        toscalar(",".join([str(x) for x in IfitDict["perror"]])),
        "IfitChiSq": toscalar(IfitDict["chiSq"]),
        "IfitChiSqRed": toscalar(IfitDict["chiSqRed"]),
        "IfitPolyOrd": toscalar(IfitDict["polyOrd"]),
        "Ifitfreq0": toscalar(IfitDict["reference_frequency_Hz"]),
    }
    json.dump(saveDict, open(outFile, "w"))
    outFile = f"{prefixOut}_m{modelNum}_{sampler}.dat"
    FH = open(outFile, "w")
    for k, v in saveDict.items():
        FH.write("%s=%s\n" % (k, v))
    FH.close()
    print("Results saved in JSON and .dat format to:\n '%s'\n" % outFile)

    # Plot the posterior samples in a corner plot
    # chains =  aObj.get_equal_weighted_posterior()
    # chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim]
    # iFixed = [i for i, e in enumerate(fixedMsk) if e==0]
    # chains = np.delete(chains, iFixed, 1)
    # for i in sorted(iFixed, reverse=True):
    #     del(labels[i])
    #     del(p[i])

    cornerFig = result.plot_corner()

    # Save the posterior chains to ASCII file

    # Plot the data and best-fitting model
    lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000)
    freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2)
    IModArr = calculate_StokesI_model(IfitDict, freqHirArr_Hz)
    pDict = {k: v for k, v in zip(parNames, p)}
    quModArr = model(pDict, lamSqHirArr_m2)
    model_dict = {
        "model": model,
        "parNames": parNames,
        "posterior": result.posterior,
    }
    specFig.clf()
    plot_Ipqu_spectra_fig(
        freqArr_Hz=freqArr_Hz,
        IArr=IArr,
        qArr=qArr,
        uArr=uArr,
        dIArr=dIArr,
        dqArr=dqArr,
        duArr=duArr,
        freqHirArr_Hz=freqHirArr_Hz,
        IModArr=IModArr,
        qModArr=quModArr.real,
        uModArr=quModArr.imag,
        model_dict=model_dict,
        fig=specFig,
    )
    specFig.canvas.draw()

    # Save the figures
    outFile = prefixOut + "fig_m%d_specfit.pdf" % modelNum
    specFig.set_canvas(specFig.canvas)
    specFig.figure.savefig(outFile)
    print("Plot of best-fitting model saved to:\n '%s'\n" % outFile)
    outFile = prefixOut + "fig_m%d_corner.pdf" % modelNum
    cornerFig.set_canvas(cornerFig.canvas)
    cornerFig.savefig(outFile)
    print("Plot of posterior samples saved to \n '%s'\n" % outFile)

    # Display the figures
    if showPlots:
        specFig.figure.show()
        cornerFig.show()