コード例 #1
0
ファイル: plot_widths.py プロジェクト: rferdman/pypsr
def main():
    # Input print_resids-format data file will be first argument.
    # (command-line options to come later)
    width_file = argv[1:]

    width_data = []
    for wfile in width_file:
        #          width_data.append(read_widths(wfile))
        width_data.append(read_widths(wfile, units_in="phase", units_out="phase"))
    #     print res_data['mjd']

    #     plot_widths(width_data, yunits='deg')
    plot_widths(width_data, yunits="deg", canvassize=(11, 6), ticklabelsize=22, axislabelsize=22)

    plot_file = "widths.eps"

    plt.savefig(plot_file)
コード例 #2
0
ファイル: read_width.py プロジェクト: rferdman/pypsr
def read_width_rad(data_file, rfile=None):

    # Open data file

#    try:
#        f_data = open(data_file, 'r')
#    except IOError as (errno, strerror):
#        if (errno == 2):  # file not found
#            print "IOError ({0}): File".format(errno), proffile, "not found"
#        else:
#            print "IOError ({0}): {1}".format(errno, strerror)
#            exit

#    mjd, width, width_err = np.loadtxt(f_data, dtype='double', comments='#', \
#                                           usecols=(0,1,2), unpack=True)
 
# Use my standard width reader -- returns widths in units of phase
    w = read_widths(data_file, rfile=rfile)

# If we want to adjust widths by comparing means of a reference width file and 
# subtracting by difference of means, then user must provide a reference width 
# file (same format), and this adjustment will be made.
#    if(rfile != None):
#        rw = read_widths(rfile)
#        w['width'] = w['width'] - \
#            (np.average(w['width'], weights=1./(w['werr']**2)) - \
#                np.average(rw['width'], weights=1./(rw['werr']**2)))
#        w['werr'] = np.sqrt(w['werr']**2 + \
#                                (np.std(w['width']))**2 + \
#                                (np.std(rw['width']))**2 )
             
   
    # get widths in radians from units of pulse phase..
    y = 2.*np.pi*w['width']
#    y = np.cos(y_rad)
    
    # Find corresponding y_err. First convert to half-width error, in radians...
    y_err = 2.*np.pi*w['werr']
    # Now get error of y=cos(phi0)...
#    y_err = (y_err_rad*y_err_rad*np.cos(y_rad)/2.) + np.sin(y_rad)*y_err_rad

    return w['mjd'], y, y_err
コード例 #3
0
ファイル: read_width.py プロジェクト: rferdman/pypsr
def read_width_cos_phi(data_file, rfile=None):

    # Open data file
#    try:
#        f_data = open(data_file, 'r')
#    except IOError as (errno, strerror):
#        if (errno == 2):  # file not found
#            print "IOError ({0}): File".format(errno), proffile, "not found"
#        else:
#            print "IOError ({0}): {1}".format(errno, strerror)
#            exit

#    mjd, width, width_err = np.loadtxt(f_data, dtype='double', comments='#', \
#                                           usecols=(0,1,2), unpack=True)

# Use my standard width reader -- returns widths in units of phase
    w = read_widths(data_file, rfile=rfile)

# If we want to adjust widths by comparing means of a reference width file and 
# subtracting by difference of means, then user must provide a reference width 
# file (same format), and this adjustment will be made.
#    if(rfile != None):
#        rw = read_widths(rfile)
#        w['width'] = w['width'] - \
#            (np.average(w['width'], weights=1./(w['werr']**2)) - \
#                np.average(rw['width'], weights=1./(rw['werr']**2)))
#        w['werr'] = np.sqrt(w['werr']**2 + \
#                                (np.std(w['width']))**2 + \
#                                (np.std(rw['width']))**2 )



    # Convert widths to cos(phi0) = cos(half width), where phi0 is in radians
    # First convert widths from units of pulse phase to radians, then divide by 2 to
    # get half-widths.  Then take cosine of these to get y array:
    phi = 2.*np.pi*w['width']/2.  # Multiply by 2 then divide by 2 for clarity :)
    cos_phi = np.cos(phi)

    # Find corresponding y_err. First convert to half-width error, in radians...
    phi_err = 2.*np.pi*w['werr']/2.
    # Now get error of y=cos(phi0)...
    # cos_phi_err = np.abs(np.sin(phi)*phi_err - (phi_err*phi_err*np.cos(phi)/2.))

# Simple error method: average the diff between max/min of cos and cos itself.
    cos_max = np.cos(phi + phi_err)
    cos_min = np.cos(phi - phi_err)

    # cos_phi_err = np.mean([np.abs(cos_max - cos_phi), np.abs(cos_min - cos_phi)])
    
    cos_phi_err = np.zeros_like(cos_phi)
    for i_cos in np.arange(len(cos_phi_err)):
        cos_phi_err[i_cos] = np.mean([np.abs(cos_max[i_cos] - cos_phi[i_cos]), np.abs(cos_min[i_cos] - cos_phi[i_cos])])

#    print "phi = ", phi
#    print "phi_err = ", phi_err
    
#    print ""
#    print "cos_phi = ", cos_phi
#    print "cos_phi_err = ", cos_phi_err


    return w['mjd'], cos_phi, cos_phi_err
コード例 #4
0
ファイル: plot_widths_multi.py プロジェクト: rferdman/pypsr
def main():
     input_files = argv[1:]

# Parse command line to get all width files we wish to plot
     width_file = []
     for file_name in input_files:
          # For some reason this works and ".append()" doesn't:
          width_file[len(width_file):] = glob.glob(file_name)

     n_subplots = len(width_file)
     print "N_SUBPLOTS = ", n_subplots
     
# Set up the plot:
     fig = plt.figure()#figsize=canvassize)

     fig_top = 0.95
     fig_bottom = 0.13
     fig_left = 0.13
     fig_right = 0.95

     for i_w in np.arange(n_subplots):
          print "I_W = ", i_w
#          width_data =[]
#          for wfile in width_files:
          width_data = read_widths(width_file[i_w])
#     print res_data['mjd']

# Now plot width in degrees vs time in years for each subplot:
#          for width in width_data:
                    # date_out = [mjd.mjdtodate(m) for m in width['mjd']]
          width_data['year'] = [mjd.mjdtoyear(m) for m in width_data['mjd']]
          width_data['width'] *= 360.
          width_data['werr']  *= 360.
                    # width['year'] = [d.year + d.day/365. + \
                        #                    d.hour/(365.*24.) + \
                        #                    d.minute/(365.*24.*60.) + \
                        #                    d.second/(365.*24.*60.*60.) \
               #                    for d in date_out]
# Set up plot limits now
          xmin = np.amin(width_data['year'])-0.25
          xmax = np.amax(width_data['year'])+0.25
          ymin = np.amin(width_data['width'] - width_data['werr'])
          ymax = np.amax(width_data['width'] + width_data['werr'])
          xspan = abs(xmax - xmin)
          yspan = abs(ymax - ymin)
          
#          ax = fig.add_subplot(n_subplots, 1, i_w+1)
          if(i_w == 0):
               max_yspan = yspan
          else:
# Keep track of max yspan to later set all plots to have same scale
               if(yspan > max_yspan):
                    max_yspan = yspan
          ax = fig.add_axes([fig_left, \
                    fig_bottom+(fig_top-fig_bottom)*(float(n_subplots-(i_w+1))/float(n_subplots)),\
                    fig_right-fig_left, \
                    (fig_top-fig_bottom)*(1./float(n_subplots))])
#          ax.set_ylabel('Pulse width (degrees)')
          ax.set_xlim(xmin, xmax)
# Set y limits so that all plots have same scale
          ax.set_ylim(0.5*(ymin+ymax)-0.5*max_yspan, \
                           0.5*(ymin+ymax)+0.5*max_yspan)
          if(i_w < n_subplots-1):
               ax.xaxis.set_ticklabels([])
          else:
               xMajorFormatter = FormatStrFormatter('%d')
               ax.xaxis.set_major_formatter(xMajorFormatter)
          yMajorFormatter = FormatStrFormatter('%.1f')
          ax.yaxis.set_major_formatter(yMajorFormatter)
          ax.yaxis.set_major_locator(MaxNLocator(5, prune='both'))


# Now plot the widths
          ax.plot(width_data['year'], width_data['width'], 'o')
          ax.errorbar(width_data['year'], width_data['width'], \
                           width_data['werr'], fmt=None)
#          plot_widths(width_data, yunits='deg')


#  Finally, put axis labels for entire figure, and ensure that there aren't corner tick labels... maybe adjust y limits to avoid this?  There was a way of setting it, but maybe too much waster of time to find it.
     fig.text(0.5*(fig_left+fig_right), 0.06, 'Year', fontsize=16,  ha='center', va='center')
     fig.text(0.04, 0.5*(fig_top+fig_bottom), 'Pulse width (deg)', fontsize=16,  ha='center', va='center', rotation='vertical')
     #fig.text(0.1, 0.5, )

     plot_file = 'widths_multi.png'
          
     plt.savefig(plot_file)
コード例 #5
0
def main():

    progname = "run_profile_shape_fit.py"
    args = get_opt(progname)

    input_files = []
    for infile in args.wfiles:
        # For some reason this works and ".appen()" doesn't:
        input_files[len(input_files) :] = glob.glob(infile)

    n_alpha = np.sum(args.alpha_sampling)
    n_delta = np.sum(args.delta_sampling)
    n_T1 = np.sum(args.T1_sampling)

    print "Pulsar parameters:"
    print "   Pulsar:  " + args.psr_name
    print "   Precession period:  " + str(u.day.to(u.year, args.prec_period)) + " years"
    print "   Inclination:  " + str(np.degrees(args.incl)) + " deg"
    print "Fitting parameters:"
    print "   Alpha: " + str(n_alpha) + " points spanning the ranges " + str(
        np.degrees(args.alpha_ranges)
    ) + " with corresponding sampling " + str(args.alpha_sampling)
    print "   Delta: " + str(n_delta) + " points spanning the ranges " + str(
        np.degrees(args.delta_ranges)
    ) + " with corresponding sampling " + str(args.delta_sampling)
    print "   T1:    " + str(n_T1) + " points spanning the ranges " + str(
        args.T1_ranges
    ) + " with corresponding sampling " + str(args.T1_sampling) + "\n"

    # We will have a list of dictionaries, each representing a given input file
    wdata = []
    # Run through data files, and creates list of dictionaries of width data
    for i_file in np.arange(len(args.wfiles)):
        print "Reading in width data file " + args.wfiles[i_file]
        # Read in data from each file and append cos_phi values
        wdata.append(read_widths(args.wfiles[i_file], units_in="phase", units_out="cos_phi"))

    # Start doing the fit to the Rafikov and Lai model
    ####### mjd, y, y_err = read_width_cos_phi(data_file)

    # Finally, plot cos(phi):
    # print "Y: ", y
    # print "YERR: ", y_err
    ##    wdata = {'mjd':mjd, 'width':y, 'werr':y_err}
    ##    plot_widths(wdata)
    ##    plt.savefig('cos_phi_rl.png')

    #######mjd_mid = np.mean([np.min(mjd), np.max(mjd)])
    #    init_guess = [1.55, 0.14, .38, mjd_mid+1056.]

    # If we want to include the Parkes data, we must read in the corresponding
    # width file, then subtract the difference, adding the appropriate errors
    # on the means to the Parkes widths in quadrature:
    #####if(use_parkes_data == True):
    #####mjd_parkes, y_parkes, y_err_parkes = \
    ######read_width_cos_phi(parkes_data_file, rfile=data_file)

    #        y_parkes = y_parkes - (np.average(y_parkes, weights=1./(y_err_parkes**2)) - np.average(y, weights=1./(y_err**2)))
    #        y_err_parkes = np.sqrt(y_err_parkes**2 + \
    #                                (np.std(y_parkes))**2 + (np.std(y))**2)

    ##### mjd = np.append(mjd_parkes, mjd)
    ##### y = np.append(y_parkes, y)
    ##### y_err = np.append(y_err_parkes, y_err)

    # Set up data file names -- leaving off '.npy' because when saving it is
    # not included, whereas when loading, it IS included...
    grid_param_file = args.data_filebase + "_param"
    grid_prob_file = args.data_filebase + "_prob"

    ##### Set up data for fit #######
    mjd = []
    y = []
    y_err = []
    for w in wdata:
        mjd.append(w["mjd"])
        y.append(w["width"])
        y_err.append(w["werr"])
    mjd = np.array(mjd)
    y = np.array(y)
    y_err = np.array(y_err)

    # If not loading data in, then do grid fit:
    if args.load_results != True:

        # Make input variables into on nice easy-to-read dictionary:
        input_data = {"mjd": mjd, "y": y, "y_err": y_err, "incl": args.incl, "prec_period": args.prec_period}
        input_params = {
            "rho_lim": args.rho_lim,
            "rho_guess": args.rho_guess,
            "alpha_ranges": args.alpha_ranges,
            "delta_ranges": args.delta_ranges,
            "T1_ranges": args.T1_ranges,
            "alpha_sampling": args.alpha_sampling,
            "delta_sampling": args.delta_sampling,
            "T1_sampling": args.T1_sampling,
        }

        ##### Do fit: #####
        p_out = profile_shape_fit(input_params, input_data)

        # Expand output dictionary:
        norm_like = p_out["norm_like"]
        norm_vol = p_out["norm_vol"]
        #        alpha_weights = p_out['alpha_weights']
        #        delta_weights = p_out['delta_weights']
        #        T1_weights = p_out['T1_weights']
        alpha = p_out["alpha"]
        delta = p_out["delta"]
        T1 = p_out["T1"]
        rho = p_out["rho"]
        rho_prob = p_out["rho_prob"]

        # If saving results to file for later use, save pdf array separately from
        # parameter values
        if args.save_results == True:
            prob_array = np.array([norm_like, norm_vol])
            np.save(grid_prob_file, prob_array)
            #           weight_array = np.array([alpha_weights, delta_weights, T1_weights])
            #            np.save(grid_weight_file_rl, weight_array)
            param_array = np.array([alpha, delta, T1, rho, rho_prob])
            np.save(grid_param_file, param_array)
            print "norm_like shape  = ", norm_like.shape
            print "norm_vol shape  = ", norm_vol.shape
            print "param_array shape = ", param_array.shape

    # Otherwise load data:
    else:
        prob_array = np.load(grid_prob_file_rl + ".npy")
        norm_like = prob_array[0]
        norm_vol = prob_array[1]
        print "Loading data from {0:s}.".format(grid_prob_file_rl + ".npy")
        #        weight_array = np.load(grid_weight_file_rl+'.npy')
        #        print 'Loading data from {0:s}.'.format(grid_weight_file_rl+'.npy')
        #        alpha_weights = weight_array[0]
        #        delta_weights = weight_array[1]
        #        T1_weights = weight_array[2]
        param_array = np.load(grid_param_file_rl + ".npy")
        print "Loading data from {0:s}.".format(grid_param_file_rl + ".npy")
        alpha = param_array[0]
        delta = param_array[1]
        T1 = param_array[2]
        rho = param_array[3]
        rho_prob = param_array[4]

    # Also need to generally redefine n_alpha in, etc. since file may be different
    # than input parameter file
    n_alpha = len(alpha)
    n_delta = len(delta)
    n_T1 = len(T1)
    n_dims = (n_alpha, n_delta, n_T1)

    print "n_dims = ", n_dims
    # print "alpha = ", np.degrees(alpha)
    # print "delta = ", np.degrees(delta)
    # print "T1 = ", T1

    # Collapse 3-d chi2 grid into 3 2-d grids (alpha/delta, alpha/T1, delta/T1)

    # The likelihood returned by grid_fit is normalized.  So, in summing
    # along an axis, :
    # 	 np.sum(norm_like * weight[i_axis], axis = i_axis) = 1

    # Sum along each direction in turn, giving a 2-d array for each pair of parameters:

    # Convert T1 to years
    T1_year = np.zeros_like(T1)
    for i_T1 in np.arange(len(T1)):
        T1_year[i_T1] = mjdtoyear(T1[i_T1])

    # Get delta/T1 array:
    prob_delta_T1 = np.sum(norm_vol, axis=0)
    plot_contour_pdf(
        np.degrees(delta),
        T1_year,
        np.transpose(prob_delta_T1),
        ###                         weights=np.transpose(delta_T1_weights), \
        xlabel="$\\delta$ (degrees)",
        ylabel="$T_1$ (year)",
    )
    plt.savefig("contour_delta_T1_rl.png")

    # Get alpha/T1 array:
    prob_alpha_T1 = np.sum(norm_vol, axis=1)
    plot_contour_pdf(
        T1_year,
        np.degrees(alpha),
        prob_alpha_T1,
        ###                          weights=alpha_T1_weights, \
        xlabel="$T_1$ (year)",
        ylabel="$\\alpha$ (degrees)",
    )
    plt.savefig("contour_alpha_T1_rl.png")

    # Get alpha/delta array:
    prob_alpha_delta = np.sum(norm_vol, axis=2)
    plot_contour_pdf(
        np.degrees(delta),
        np.degrees(alpha),
        prob_alpha_delta,
        ###                         weights=alpha_delta_weights, \
        xlabel="$\\delta$ (degrees)",
        ylabel="$\\alpha$ (degrees)",
    )
    plt.savefig("contour_alpha_delta_rl.png")

    # (3) Collapse 3-d grid into 3 1-d histograms to get posteriors for each of
    # alpha, delta, and T1

    # Finally, get individual PDFs by summing along one more axis in the above arrays
    # (NB there will thus be two ways to get each PDF, but it doesn't matter which is
    # chosen for each)
    prob_intervals = np.array([0.683, 0.954, 0.9973])

    # alpha:
    alpha_vol = np.sum(prob_alpha_delta, axis=1)
    # Sum normalized likelihood first over the T1 then delta axes to
    # get alpha pdf:
    alpha_pdf = norm_like.sum(axis=2).sum(axis=1)
    alpha_med, alpha_prob_min, alpha_prob_max = get_pdf_prob(np.degrees(alpha), alpha_vol, prob_intervals)  ### , \
    ###                          weights=alpha_weights)
    plot_pdf(
        np.degrees(alpha),
        alpha_pdf,
        ###                  weights=alpha_weights, \
        xlabel="$\\alpha$ (degrees)",
        ylabel="Probability density",
        prob_lines=np.append(alpha_prob_min, alpha_prob_max),
        prob_linestyle=["dashed", "dashdot", "dotted", "dashed", "dashdot", "dotted"],
    )
    plt.savefig("pdf_alpha_rl.png")

    print " "
    print "ALPHA = ", alpha_med
    print "  68%: ", alpha_prob_min[0], "  ", alpha_prob_max[0]
    print "  95%: ", alpha_prob_min[1], "  ", alpha_prob_max[1]
    print "  99%: ", alpha_prob_min[2], "  ", alpha_prob_max[2]
    print " "

    # delta:
    ###    pdf_delta = np.sum(prob_delta_T1*delta_T1_weights, axis=1)
    delta_vol = np.sum(prob_delta_T1, axis=1)
    # Sum normalized likelihood over T1, then alpha axis to get delta pdf:
    delta_pdf = norm_like.sum(axis=2).sum(axis=0)
    if np.degrees(np.amax(args.delta_ranges)) > 100.0:
        delta_upper = get_pdf_prob(
            np.degrees(delta[0 : n_delta / 2]),
            delta_vol[0 : n_delta / 2],
            prob_intervals,
            ###                         weights=delta_weights[0:n_delta/2], \
            upper=True,
        )
        delta_lower = get_pdf_prob(
            np.degrees(delta[n_delta / 2 : n_delta]),
            delta_vol[n_delta / 2 : n_delta],
            prob_intervals,
            ###                             weights=delta_weights[n_delta/2:n_delta], \
            lower=True,
        )

        plot_pdf(
            np.degrees(delta),
            delta_pdf,
            ###                     weights=delta_weights, \
            xlabel="$\\delta$ (degrees)",
            ylabel="Probability density",
            prob_lines=np.append(delta_upper, delta_lower),
            prob_linestyle=["dashed", "dashdot", "dotted", "dashed", "dashdot", "dotted"],
        )
        print " "
        print "DELTA: "
        print "  68%: < ", delta_upper[0], " / > ", delta_lower[0]
        print "  95%: < ", delta_upper[1], " / > ", delta_lower[1]
        print "  99%: < ", delta_upper[2], " / > ", delta_lower[2]
        print " "
    else:
        delta_upper = get_pdf_prob(
            np.degrees(delta[0:n_delta]),
            delta_vol[0:n_delta],
            prob_intervals,
            ###                             weights=delta_weights[0:n_delta], \
            upper=True,
        )

        plot_pdf(
            np.degrees(delta),
            delta_pdf,
            ###                     weights=delta_weights, \
            xlabel="$\\delta$ (degrees)",
            ylabel="Probability density",
            prob_lines=delta_upper,
            prob_linestyle=["dashed", "dashdot", "dotted"],
        )
        print " "
        print "DELTA: "
        print "  68%: < ", delta_upper[0]
        print "  95%: < ", delta_upper[1]
        print "  99%: < ", delta_upper[2]
        print " "

    plt.savefig("pdf_delta_rl.png")

    # T1:
    ###    pdf_T1 = np.sum(prob_alpha_T1*alpha_T1_weights, axis=0)
    T1_vol = np.sum(prob_alpha_T1, axis=0)
    # Sum normalized likelihood over delta, then alpha to get T1 pdf:
    T1_pdf = norm_like.sum(axis=1).sum(axis=0)
    # Now figure out peaks in T1 distribution, and overplot our data span using
    # boundary lines.  Do this by taking first peak, then find second by removing
    # peak MJD +/- prec_period/4 (so that prec_period/2. of T1 remains)..
    T1_peak = np.zeros(2)
    peak_ind = T1_pdf.argmax()
    T1_peak[0] = T1_year[peak_ind]
    # peak is too close to first index:
    if peak_ind - n_T1 / 4 < 0:
        extra_ind = n_T1 / 4 - peak_ind
        temp_T1 = T1_year[peak_ind + n_T1 / 4 : n_T1 - extra_ind]
        temp_pdf = T1_pdf[peak_ind + n_T1 / 4 : n_T1 - extra_ind]
    # peak is too close to last index:
    elif peak_ind + n_T1 / 4 > n_T1:
        extra_ind = (peak_ind + n_T1 / 4) - n_T1
        temp_T1 = T1_year[extra_ind : peak_ind - n_T1 / 4]
        temp_pdf = T1_pdf[extra_ind : peak_ind - n_T1 / 4]
    # peak falls within +/- prec_period of edges:
    else:
        temp_T1 = np.append(T1_year[peak_ind + n_T1 / 4 : n_T1], T1_year[0 : peak_ind - n_T1 / 4])
        temp_pdf = np.append(T1_pdf[peak_ind + n_T1 / 4 : n_T1], T1_pdf[0 : peak_ind - n_T1 / 4])

    peak_ind = temp_pdf.argmax()
    T1_peak[1] = temp_T1[peak_ind]
    mjd_mid = np.mean([np.amin(mjd), np.amax(mjd)])
    print " "
    print "T1: "
    print "  peak 1: ", T1_peak[0], " = MJD ", yeartomjd(T1_peak[0])
    print "  peak 2: ", T1_peak[1], " = MJD ", yeartomjd(T1_peak[1])
    print "  data range =  ", np.amin(mjd), np.amax(mjd)
    print "  mjd mid = ", mjd_mid

    plot_pdf(
        T1_year,
        T1_pdf,
        ###                 weights=T1_weights, \
        xlabel="$T_1$ (year)",
        ylabel="Probability density",
        prob_lines=[mjdtoyear(np.amin(mjd)), mjdtoyear(np.amax(mjd))],
        prob_linestyle="dashed",
    )
    plt.savefig("pdf_T1_rl.png")

    ###### Now deal with rho values #####
    n_rho = len(args.rho_guess)
    for i_rho in np.arange(n_rho):

        # Make a histogram of rho values to see where majority of probability
        # lies, using the fit reduced chi squared as a weight
        pdf_rho, bin_edges = np.histogram(
            rho[i_rho], 256, range=(args.rho_lim[0], args.rho_lim[1]), density=True, weights=rho_prob
        )
        # We can define the bin centres as follows since our call to np/histogram gives
        # back evenly spaced bins
        bin_size = bin_edges[1] - bin_edges[0]
        rho_val = bin_edges[0 : len(bin_edges) - 1] + 0.5 * bin_size
        # Get PDF intervals and values:
        # pdf_rho = rho_hist/np.sum(rho_hist)
        rho_med, rho_prob_min, rho_prob_max = get_pdf_prob(np.degrees(rho_val), pdf_rho, prob_intervals, norm=True)

        print " "
        print "RHO " + str(i_rho) + " = ", rho_med
        print "  68%: ", rho_prob_min[0], "  ", rho_prob_max[0]
        print "  95%: ", rho_prob_min[1], "  ", rho_prob_max[1]
        print "  99%: ", rho_prob_min[2], "  ", rho_prob_max[2]
        print " "

        # Plot rho PDFs:
        # Set colour
        if n_rho == 1:
            clr = "black"
        else:
            clr = cm.jet(float(i_rho) / float(n_rho) - 1)
        if i_rho == 0:
            min_pdf_rho = np.amin(pdf_rho)
            max_pdf_rho = np.amax(pdf_rho)
            plot_pdf(
                np.degrees(rho_val), pdf_rho, xlabel="$\\rho$ (degrees)", ylabel="Probability density", linecolour=clr
            )
        else:
            min_pdf_rho = np.amin(np.append(min_pdf_rho, np.amin(pdf_rho)))
            max_pdf_rho = np.amax(np.append(max_pdf_rho, np.amax(pdf_rho)))
            plot_pdf(np.degrees(rho_val), pdf_rho, linecolour=clr, overplot=True)
    plt.xlim(np.degrees(np.amin(rho_val)), np.degrees(np.amax(rho_val)))
    plt.ylim(min_pdf_rho, max_pdf_rho)
    plt.savefig("pdf_rho_rl.png")