Exemple #1
0
def uncertainties(cube_id):
    # Find the signal-to-noise value for the specific galaxy being considered
    file_loc = "ppxf_results/cube_" + str(int(cube_id))
    ppxf_x = np.load(file_loc + "/cube_" + str(int(cube_id)) + "_x.npy")
    ppxf_y = np.load(file_loc + "/cube_" + str(int(cube_id)) + "_y.npy")

    ppxf_variables = np.load(file_loc + "/cube_" + str(int(cube_id)) +
                             "_ppxf_variables.npy")
    ppxf_sigma = ppxf_variables[1]
    ppxf_vel = ppxf_variables[0]

    lmfit_data = spectra_data.lmfit_data(cube_id)
    z = lmfit_data['z']

    # range to consider is between CaH and Hdelta
    rtc = np.array([4000 * (1 + z), 4080 * (1 + z)])
    rtc_mask = ((ppxf_x > rtc[0]) & (ppxf_x < rtc[1]))

    y_masked = ppxf_y[rtc_mask]
    noise = np.std(y_masked)

    sn = y_masked / noise  # signal to noise for every signal pixel
    average_sn = np.average(sn)

    # pPXF: uncertainty for sigma_star
    bf_ppxf = np.load("uncert_ppxf/sigma_curve_best_values_ppxf.npy")
    uncert_ppxf = curve(average_sn, bf_ppxf)

    # lmfit: uncertainty for sigma_OII
    bf_lmfit = np.load("uncert_lmfit/sigma_curve_best_values_lmfit.npy")
    uncert_lmfit = curve(average_sn, bf_lmfit)

    return {'ppxf': uncert_ppxf, 'lmfit': uncert_lmfit, 'sn': sn}
Exemple #2
0
def data_obtainer(cube_id):
    # uses the cube ID to return:
    # Cube ID, RAF ID, RA, Dec, HST F606, z, V_*, sig_*, V_OII, sig_OII

    # load the combined catalogue
    file_read = catalogue_plots.read_cat("data/matched_catalogues.fits")
    catalogue_data = file_read['data']

    # locating row where catalogue data is stored
    #cat_loc = np.where(catalogue_data[:,375]==cube_id)
    #print(cat_loc)

    for i_object in range(len(catalogue_data)):
        curr_object = catalogue_data[i_object]
        curr_id = curr_object[375]

        if curr_id == cube_id:
            curr_raf_id = curr_object[7]
            curr_ra = curr_object[1]
            curr_dec = curr_object[2]
            curr_f606 = curr_object[64]
            curr_f606_err = curr_object[65]

            curr_f606_w_err = ufloat(curr_f606, curr_f606_err)
            curr_f606_w_err = '{:.1uSL}'.format(curr_f606_w_err)

    # obtaining redshift and it's error from our doublet fitting
    oii_data = spectra_data.lmfit_data(cube_id)
    curr_z = oii_data['z']
    curr_z_err = oii_data['err_z_alt']

    curr_z_w_err = ufloat(curr_z, curr_z_err)
    curr_z_w_err = '{:.1uSL}'.format(curr_z_w_err)

    c = 299792.458  # speed of light in kms^-1

    # loading "a" factors in a/x model
    a_sigma_ppxf = np.load("uncert_ppxf/sigma_curve_best_values_ppxf.npy")
    a_sigma_lmfit = np.load("uncert_lmfit/sigma_curve_best_values_lmfit.npy")
    a_vel_ppxf = np.load("uncert_ppxf/vel_curve_best_values_ppxf.npy")
    a_vel_lmfit = np.load("uncert_lmfit/vel_curve_best_values_lmfit.npy")

    # rounding to 4 decimal places
    #curr_z = np.around(curr_z, decimals=4)
    #curr_z_err = np.around(curr_z_err, decimals=4)

    # obtaining the velocities and velocity dispersions plus their errors
    fitted_data = np.load("data/ppxf_fitter_data.npy")
    fd_loc = np.where(fitted_data[:, 0] == cube_id)[0]

    if fd_loc.size == 0:
        # cubes that didn't have enough S/N for absorption line fitting with pPXF

        # calculate S/N for each cube
        # loading x and y data
        spec_data_x = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                              str(cube_id) + "_cbd_x.npy")
        spec_data_y = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                              str(cube_id) + "_cbs_y.npy")

        # S/N region is (4000,4080)*(1+z)
        # wavelength range mask
        mask_tc = (spec_data_x > 4000 * (1 + curr_z)) & (spec_data_x < 4080 *
                                                         (1 + curr_z))
        mask_applied = np.where(mask_tc)[0]

        if mask_applied.size == 0:
            wr_mask = ((spec_data_x > 3727 * (1 + curr_z) - 100) &
                       (spec_data_x < 3727 * (1 + curr_z) - 50))
        else:
            wr_mask = (mask_tc)

        stn_x = spec_data_x[wr_mask]  # signal-to-noise wavelength region
        stn_y = spec_data_y[wr_mask]

        stn_mean = np.mean(stn_y)
        stn_std = np.std(stn_y)

        stn = stn_mean / stn_std  # signal-to-noise
        curr_sn = stn

        sigma_oii = oii_data['sigma_gal']  # vel dispersion from lmfit
        sigma_oii = np.abs(
            (sigma_oii / (3727 * (1 + curr_z))) * c)  # convert to km/s
        sigma_oii_err = (a_sigma_lmfit / curr_sn) * sigma_oii

        vel_oii = c * np.log(1 + curr_z)
        vel_oii_err = (a_vel_lmfit / curr_sn) * vel_oii

        #print(sigma_oii, sigma_oii_err, vel_oii, vel_oii_err)

        vel_oii_w_err = ufloat(vel_oii, vel_oii_err)
        vel_oii_w_err = '{:.1ufSL}'.format(vel_oii_w_err)

        sigma_oii_w_err = ufloat(sigma_oii, sigma_oii_err)
        sigma_oii_w_err = '{:.1ufSL}'.format(sigma_oii_w_err)

        # printer for 35 galaxy sample table
        """
        print("C"+str(cube_id) + " & " + str(curr_raf_id) + " & " + str(curr_ra) + 
                " & " + str(curr_dec) + " & $" + str(curr_f606_w_err) + "$ & $" + 
                str(curr_z_w_err) + "$ & - & - & " + str(vel_oii_w_err) + " & "
                + str(sigma_oii_w_err) + " \^ \n")
        """

    else:
        fd_curr_cube = fitted_data[fd_loc][0][
            0]  # only need the first row of data

        curr_sn = fd_curr_cube[7]  # current S/N for cube

        sigma_stars = fd_curr_cube[2]
        sigma_stars_err = (a_sigma_ppxf / curr_sn) * sigma_stars
        sigma_oii = fd_curr_cube[1]
        sigma_oii_err = (a_sigma_lmfit / curr_sn) * sigma_oii

        vel_oii = c * np.log(1 + curr_z)
        vel_oii_err = (a_vel_lmfit / curr_sn) * vel_oii
        vel_stars = fd_curr_cube[14]
        vel_stars_err = (a_vel_ppxf / curr_sn) * vel_stars

        # converting to shorthand uncertainties notation
        vel_stars_w_err = ufloat(vel_stars, vel_stars_err)
        vel_stars_w_err = '{:.1ufSL}'.format(vel_stars_w_err)

        vel_oii_w_err = ufloat(vel_oii, vel_oii_err)
        vel_oii_w_err = '{:.1ufSL}'.format(vel_oii_w_err)

        sigma_stars_w_err = ufloat(sigma_stars, sigma_stars_err)
        sigma_stars_w_err = '{:.1ufSL}'.format(sigma_stars_w_err)

        sigma_oii_w_err = ufloat(sigma_oii, sigma_oii_err)
        sigma_oii_w_err = '{:.1ufSL}'.format(sigma_oii_w_err)

        # printer for 35 galaxy sample table
        # print into terminal the correct line to input into LaTeX
        """
        print("C"+str(cube_id) + " & " + str(curr_raf_id) + " & " + str(curr_ra) + 
                " & " + str(curr_dec) + " & $" + str(curr_f606_w_err) + "$ & $" + 
                str(curr_z_w_err) + "$ & $" + str(vel_stars_w_err) + "$ & $" + 
                str(sigma_stars_w_err) + "$ & $" + str(vel_oii_w_err) + "$ & $ " + 
                str(sigma_oii_w_err) + "$ \^ \n")
        """

        # printer for S/N~4 and FWHM/2 cutter
        curr_sn = np.around(curr_sn, decimals=2)
        print("C" + str(cube_id) + " & " + str(curr_sn) + " & $" +
              str(sigma_stars_w_err) + "$ \^ \n")
Exemple #3
0
def graphs():
    catalogue = np.load("data/matched_catalogue.npy")
    catalogue = catalogue[catalogue[:, 8].argsort()]
    catalogue = catalogue[0:300, :]

    bright_objects = np.where(catalogue[:, 5] < 32.0)[0]

    avoid_objects = np.load("data/avoid_objects.npy")
    more_useless = np.array([474, 167, 1101, 1103, 744])
    #more_useless = np.array([0])

    # array to store cube id and signal to noise value]
    usable_cubes = np.zeros(
        (len(bright_objects) - len(avoid_objects) - len(more_useless) + 1, 14))

    # usable_cubes structure
    # [0] : cube id
    # [1] : v-band mag from catalogue for object
    # [2] : median for absorption range
    # [3] : mean for absorption range
    # [4] : signal to noise
    # [5] : total counts for image
    # [6] : noise for image
    # [7] : B-I colour
    # [8] : I1 flux from model fitting
    # [9] : I2 flux from model fitting
    # [10] : c from model fitting
    # [11] : sigma_gal from model fitting
    # [12] : sigma_inst from model fitting
    # [13] : z from model fitting

    usable_count = 0
    for i_cube in bright_objects:
        curr_obj = catalogue[i_cube]
        cube_id = int(curr_obj[0])

        if (cube_id in avoid_objects or cube_id in more_useless
                or usable_count == np.shape(usable_cubes)[0]):
            pass
        else:
            usable_cubes[usable_count][0] = cube_id

            cube_x_data = np.load("cube_results/cube_" + str(int(cube_id)) +
                                  "/cube_" + str(int(cube_id)) + "_cbd_x.npy")
            cube_y_data = np.load("cube_results/cube_" + str(int(cube_id)) +
                                  "/cube_" + str(int(cube_id)) + "_cbs_y.npy")

            abs_region = [5500, 7000]
            abs_region_indexes = [
                find_nearest(cube_x_data, x) for x in abs_region
            ]
            ari = abs_region_indexes

            abs_region_x = cube_x_data[ari[0]:ari[1]]
            abs_region_y = cube_y_data[ari[0]:ari[1]]

            abs_flux_median = np.abs(np.nanmedian(abs_region_y))
            abs_flux_average = np.abs(np.nanmean(abs_region_y))

            usable_cubes[usable_count][2] = -2.5 * np.log10(abs_flux_median)
            usable_cubes[usable_count][3] = -2.5 * np.log10(abs_flux_average)

            cat_file = "data/matched_catalogue.npy"
            cat_data = np.load(cat_file)
            cat_curr_cube = np.where(cat_data[:, 0] == cube_id)[0]

            # column 5 in the catalogue contains f606nm data which is about V-band
            # v-band has a midpint wavelength of ~551nm
            vband_mag = cat_data[cat_curr_cube][0][5]
            usable_cubes[usable_count][1] = vband_mag

            # we want to select a region to calculate the signal to noise
            # parameters from lmfit
            lm_params = spectra_data.lmfit_data(cube_id)
            c = lm_params['c']
            i1 = lm_params['i1']
            i2 = lm_params['i2']
            sigma_gal = lm_params['sigma_gal']
            z = lm_params['z']
            sigma_inst = lm_params['sigma_inst']

            usable_cubes[usable_count][10] = c
            usable_cubes[usable_count][8] = i1
            usable_cubes[usable_count][9] = i2
            usable_cubes[usable_count][11] = sigma_gal
            usable_cubes[usable_count][13] = z
            usable_cubes[usable_count][12] = sigma_inst

            # plotting s/n vs mag
            lower_lambda = (1 + z) * 3700
            upper_lambda = (1 + z) * 4500

            #-absorption region mask
            arm = (lower_lambda < cube_x_data) & (cube_x_data < upper_lambda)

            # cube y data for the absorption region - this is our signal
            ar_y = cube_y_data[arm]
            ar_x = cube_x_data[arm]

            cube_noise_data = cube_noise()
            spectrum_noise = cube_noise_data['spectrum_noise']

            # signal and noise
            ar_signal = np.median(ar_y)
            ar_noise = np.sum(spectrum_noise)

            signal_noise = np.abs(ar_signal / ar_noise)
            usable_cubes[usable_count][4] = signal_noise

            #print(cube_id, ar_signal, ar_noise, signal_noise)

            def abs_region_graphs():
                plt.figure()
                plt.plot(cube_x_data,
                         cube_y_data,
                         linewidth=0.5,
                         color="#000000")
                plt.plot(ar_x, ar_y, linewidth=0.3, color="#d32f2f")

                plt.axhline(ar_signal, linewidth=0.5, color="#212121")
                plt.axhline(ar_signal + ar_noise,
                            linewidth=0.5,
                            color="#212121",
                            alpha=0.75)
                plt.axhline(ar_signal - ar_noise,
                            linewidth=0.5,
                            color="#212121",
                            alpha=0.75)

                plt.ylim([-1000, 5000])

                #plt.title(r'\textbf{'+str(ar_signal)+' '+str(ar_noise)+' '+
                #str(signal_noise)+'}', fontsize=13)
                plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
                plt.ylabel(r'\textbf{Flux}', fontsize=13)
                plt.savefig(
                    "graphs/sanity_checks/absregions/absorption_region_" +
                    str(int(cube_id)) + ".pdf")

            #abs_region_graphs()

            def graphs_collapsed():
                cube_file = ("/Volumes/Jacky_Cao/University/level4/project/" +
                             "cubes_better/cube_" + str(cube_id) + ".fits")
                im_coll_data = cube_reader.image_collapser(cube_file)

                f, (ax1, ax2) = plt.subplots(1, 2)

                ax1.imshow(im_coll_data['median'], cmap='gray_r')
                ax1.set_title(r'\textbf{galaxy: median}', fontsize=20)
                ax1.set_xlabel(r'\textbf{Pixels}', fontsize=20)
                ax1.set_ylabel(r'\textbf{Pixels}', fontsize=20)

                ax2.imshow(im_coll_data['sum'], cmap='gray_r')
                ax2.set_title(r'\textbf{galaxy: sum}', fontsize=20)
                ax2.set_xlabel(r'\textbf{Pixels}', fontsize=20)
                ax2.set_ylabel(r'\textbf{Pixels}', fontsize=20)

                gal = patches.Rectangle((gal_region[0], gal_region[1]),
                                        gal_region[3] - gal_region[0],
                                        gal_region[2] - gal_region[1],
                                        linewidth=1,
                                        edgecolor='#b71c1c',
                                        facecolor='none')
                noise = patches.Rectangle((noise_region[0], noise_region[1]),
                                          noise_region[3] - noise_region[0],
                                          noise_region[2] - noise_region[1],
                                          linewidth=1,
                                          edgecolor='#1976d2',
                                          facecolor='none')

                # Add the patch to the Axes
                ax1.add_patch(gal)
                ax1.add_patch(noise)

                f.subplots_adjust(wspace=0.4)
                f.savefig("graphs/sanity_checks/stacked/stacked" +
                          str(int(cube_id)) + ".pdf")

            #graphs_collapsed()

            bband_mag = float(cat_data[cat_curr_cube][0][14])
            iband_mag = float(cat_data[cat_curr_cube][0][15])
            usable_cubes[usable_count][7] = bband_mag - iband_mag

            usable_count += 1

    #print(usable_cubes)

    unusable_cubes = ppxf_fitter.ignore_cubes()

    # --------------------------------------------------#

    # V-BAND VS. FLUX MAG
    fig, ax = plt.subplots()
    ax.scatter(usable_cubes[:, 2], usable_cubes[:, 1], s=7, color="#000000")

    cube_ids = usable_cubes[:, 0]
    for i, txt in enumerate(cube_ids):
        ax.annotate(int(txt), (usable_cubes[i][2], usable_cubes[i][1]))

    #ax.set_title(r'\textbf{V-band mag vs. flux-mag}', fontsize=13)
    ax.set_xlabel(r'\textbf{flux-mag}', fontsize=13)
    ax.set_ylabel(r'\textbf{V-band mag}', fontsize=13)
    plt.tight_layout()
    plt.savefig("graphs/sanity_checks/vband_vs_flux.pdf")

    # --------------------------------------------------#

    # S/N VS. V-BAND MAG
    fig, ax = plt.subplots()
    #ax.scatter(usable_cubes[:,1], usable_cubes[:,4], s=20, color="#000000")

    # plotting the usable cubes
    for i in range(len(usable_cubes[:, 0])):
        curr_cube = int(usable_cubes[:, 0][i])
        if curr_cube in unusable_cubes['ac']:
            ax.scatter(usable_cubes[:, 1][i],
                       usable_cubes[:, 4][i],
                       s=20,
                       color="#ffa000",
                       alpha=1.0,
                       marker="x",
                       label=r'\textbf{Not usable}')
        if curr_cube in unusable_cubes['ga']:
            ax.scatter(usable_cubes[:, 1][i],
                       usable_cubes[:, 4][i],
                       s=20,
                       color="#ffa000",
                       alpha=1.0,
                       marker="x")
        if curr_cube not in unusable_cubes['ac'] and usable_cubes[:,
                                                                  1][i] < 25.0:
            cube_data.data_obtainer(
                curr_cube)  # creating LaTeX prepared table entry

            ax.scatter(usable_cubes[:, 1][i],
                       usable_cubes[:, 4][i],
                       s=20,
                       color="#00c853",
                       alpha=1.0,
                       marker="o",
                       zorder=3,
                       label=r'\textbf{Usable}')

    ax.fill_between(np.linspace(25, 28, 100),
                    0,
                    100,
                    alpha=0.2,
                    zorder=0,
                    facecolor="#ffcdd2")

    #ax.set_title(r'\textbf{S/N vs. V-band mag }', fontsize=13)
    ax.tick_params(labelsize=20)
    ax.set_xlabel(r'\textbf{HST V-band magnitude}', fontsize=20)
    ax.set_ylabel(r'\textbf{Spectrum S/N}', fontsize=20)
    ax.invert_xaxis()
    ax.set_yscale('log')
    ax.set_ylim([0.9, 100])
    ax.set_xlim([26, 20])

    # manually setting x-tick labels to be 1 dpm

    vband_x = np.array([26.0, 24.0, 22.0, 20.0])
    ax.set_xticks(vband_x)  # locations of ticks
    ax.set_xticklabels([
        r'\textbf{' + str(vband_x[0]) + '}',
        r'\textbf{' + str(vband_x[1]) + '}',
        r'\textbf{' + str(vband_x[2]) + '}',
        r'\textbf{' + str(vband_x[3]) + '}'
    ])

    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))

    ax.legend(by_label.values(),
              by_label.keys(),
              loc='lower right',
              prop={'size': 15})

    plt.tight_layout()
    plt.savefig("graphs/sn_vs_vband.pdf", bbox_inches="tight")

    # --------------------------------------------------#

    usable_cubes_no_oii = usable_cubes
    cubes_to_ignore = ppxf_fitter.ignore_cubes()['ac']

    cubes_to_ignore_indices = []

    for i_cube in range(len(cubes_to_ignore)):
        curr_cube = cubes_to_ignore[i_cube]
        #loc = np.where(usable_cubes[:,0] == curr_cube)[0].item()
        loc = np.where(usable_cubes[:, 0] == curr_cube)[0]
        cubes_to_ignore_indices.append(loc)

    cubes_to_ignore_indices = np.sort(
        np.asarray(cubes_to_ignore_indices))[::-1]

    for i_cube in range(len(cubes_to_ignore_indices)):
        index_to_delete = cubes_to_ignore_indices[i_cube]
        usable_cubes_no_oii = np.delete(usable_cubes_no_oii,
                                        index_to_delete,
                                        axis=0)

    oii_flux = f_doublet(usable_cubes_no_oii[:, 10], usable_cubes_no_oii[:, 8],
                         usable_cubes_no_oii[:, 9], usable_cubes_no_oii[:, 11],
                         usable_cubes_no_oii[:, 13], usable_cubes_no_oii[:,
                                                                         12])

    # we want to convert the flux into proper units
    oii_flux = oii_flux * (10**(-20))  # 10**-20 Angstrom-1 cm-2 erg s-1
    oii_flux = oii_flux / (10**(-10) * 10**(-4))

    # [OII] FLUX VS. GALAXY COLOUR
    fig, ax = plt.subplots()
    ax.scatter(usable_cubes_no_oii[:, 7], oii_flux, s=7, color="#000000")

    cube_ids = usable_cubes_no_oii[:, 0]
    for i, txt in enumerate(cube_ids):
        ax.annotate(int(txt), (usable_cubes_no_oii[i][7], oii_flux[i]),
                    alpha=0.2)

    #ax.set_title(r'\textbf{S/N vs. V-band mag }', fontsize=13)
    ax.tick_params(labelsize=20)
    ax.set_xlabel(r'\textbf{Galaxy Colour (B-I)}', fontsize=20)
    ax.set_ylabel(r'\textbf{[OII] Flux}', fontsize=20)
    plt.tight_layout()
    plt.savefig("graphs/oii_flux_vs_colour.pdf", bbox_inches="tight")
    plt.close("all")

    # [OII] VELOCITY DISPERSION VS. STELLAR MAG

    # REDSHIFT DISTRIBUTION OF [OII] EMITTERS
    fig, ax = plt.subplots()

    ax.hist(usable_cubes_no_oii[:, 13],
            bins=10,
            range=(0.3, 1.6),
            facecolor="#000000")
    print(np.shape(usable_cubes_no_oii[:, 13]))
    print(np.min(usable_cubes_no_oii[:, 13]), np.max(usable_cubes_no_oii[:,
                                                                         13]))

    ax.tick_params(labelsize=20)
    ax.set_xlabel(r'\textbf{Redshift}', fontsize=20)
    ax.set_ylabel(r'\textbf{Number of galaxies}', fontsize=20)
    plt.tight_layout()
    plt.savefig("graphs/redshift_distribution_oii_emitters.pdf")
    plt.close("all")

    # OII LUMINOSITY VS. REDSHIFT
    # let's pick out the flux which is the smallest
    oii_flux_smallest = np.min(oii_flux)
    ofs_redshifts = np.arange(0, 1.6, 0.01)
    ofs_luminosity = luminosity_flux(ofs_redshifts, oii_flux_smallest)

    cubes_luminosity = luminosity_flux(usable_cubes_no_oii[:, 13], oii_flux)

    print(len(usable_cubes_no_oii[:, 13]))

    fig, ax = plt.subplots()
    ax.plot(ofs_redshifts, ofs_luminosity, linewidth=1.5, color="#9e9e9e")
    ax.scatter(usable_cubes_no_oii[:, 13],
               cubes_luminosity,
               s=20,
               color="#000000")

    cube_ids = usable_cubes_no_oii[:, 0]
    for i, txt in enumerate(cube_ids):
        pass
        #ax.annotate(int(txt), (usable_cubes_no_oii[i][13], cubes_luminosity[i]),
        #alpha=0.2)

    ax.fill_between(np.linspace(0.0, 0.3, 100),
                    0.007 * 10**44,
                    1.7 * 10**55,
                    alpha=0.2,
                    zorder=0,
                    facecolor="#ffcdd2")

    ax.tick_params(labelsize=20)
    ax.set_xlabel(r'\textbf{Redshift}', fontsize=20)
    ax.set_ylabel(r'\textbf{[OII] Luminosity (W)}', fontsize=20)
    ax.set_yscale('log')
    ax.set_xlim([0.0, 1.5])
    ax.set_ylim((0.5 * 10**45, 0.3 * 10**52))
    plt.tight_layout()
    plt.savefig("graphs/o_ii_luminosity_vs_redshift.pdf", bbox_inches="tight")
    plt.close("all")
Exemple #4
0
def fitting_plotter(cube_id, ranges, x_data, y_data, x_model, y_model, noise):
    # parameters from lmfit
    lm_params = spectra_data.lmfit_data(cube_id)
    c = lm_params['c']
    i1 = lm_params['i1']
    i2 = lm_params['i2']
    sigma_gal = lm_params['sigma_gal']
    z = lm_params['z']
    sigma_inst = lm_params['sigma_inst']

    # scaled down y data 
    y_data_scaled = y_data/np.median(y_data)

    # opening cube to obtain the segmentation data
    cube_file = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/cube_"
        + str(cube_id) + ".fits")
    hdu = fits.open(cube_file)
    segmentation_data = hdu[2].data
    seg_loc_rows, seg_loc_cols = np.where(segmentation_data == cube_id)
    signal_pixels = len(seg_loc_rows) 

    # noise spectra will be used as in the chi-squared calculation
    noise_median = np.median(noise)
    noise_stddev = np.std(noise) 

    residual = y_data_scaled - y_model
    res_median = np.median(residual)
    res_stddev = np.std(residual)

    noise = noise

    mask = ((residual < res_stddev) & (residual > -res_stddev)) 
 
    chi_sq = (y_data_scaled[mask] - y_model[mask])**2 / noise[mask]**2
    total_chi_sq = np.sum(chi_sq)

    total_points = len(chi_sq)
    reduced_chi_sq = total_chi_sq / total_points

    print("Cube " + str(cube_id) + " has a reduced chi-squared of " + 
            str(reduced_chi_sq))

    # spectral lines
    sl = spectra_data.spectral_lines()

    plt.figure()

    plt.plot(x_data, y_data_scaled, linewidth=1.1, color="#000000")
    plt.plot(x_data, y_data_scaled+noise_stddev, lw=0.1, c="#616161", alpha=0.1)
    plt.plot(x_data, y_data_scaled-noise_stddev, lw=0.1, c="#616161", alpha=0.1)
    
    # plotting over the OII doublet
    doublets = np.array([3727.092, 3728.875])
    dblt_av = np.average(doublets) * (1+z)

    if (ranges[0] > dblt_av):
        pass
    else:
        dblt_x_mask = ((x_data > dblt_av-20) & (x_data < dblt_av+20))
        doublet_x_data = x_data[dblt_x_mask]
        doublet_data = spectra_data.f_doublet(doublet_x_data, c, i1, i2, sigma_gal, 
                z, sigma_inst)
        doublet_data = doublet_data / np.median(y_data)
        plt.plot(doublet_x_data, doublet_data, linewidth=0.5, color="#9c27b0")

    max_y = np.max(y_data_scaled) 

    plt.plot(x_model, y_model, linewidth=1.5, color="#b71c1c")

    residuals_mask = (residual > res_stddev) 
    rmask = residuals_mask

    #plt.scatter(x_data[rmask], residual[rmask], s=3, color="#f44336", alpha=0.5)
    plt.scatter(x_data[mask], residual[mask]-1, s=3, color="#43a047")

    plt.xlim([ranges[0], ranges[1]])

    plt.tick_params(labelsize=15)
    plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=15)
    plt.ylabel(r'\textbf{Relative Flux}', fontsize=15)
    plt.tight_layout()
 
    # range specifier for file name
    range_string = str(ranges[0]) + "_" + str(ranges[1])

    plt.savefig("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id))
            + "_" + range_string + "_fitted.pdf")

    plt.close("all")
Exemple #5
0
def kinematics_sdss(cube_id, y_data_var, fit_range):     
    file_loc = "ppxf_results" + "/cube_" + str(int(cube_id))
    if not os.path.exists(file_loc):
        os.mkdir(file_loc) 

    # reading cube_data
    cube_file = ("/Volumes/Jacky_Cao/University/level4/project/cubes_better/cube_"
        + str(cube_id) + ".fits")
    hdu = fits.open(cube_file)
    t = hdu[1].data

    spectra = cube_reader.spectrum_creator(cube_file)
     
    # using our redshift estimate from lmfit
    lmfitd = spectra_data.lmfit_data(cube_id)
    z = lmfitd['z']

    cube_x_data = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" + 
        str(int(cube_id)) + "_cbd_x.npy") 
    if (np.sum(y_data_var) == 0):
        cube_y_data = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" + 
            str(int(cube_id)) + "_cbs_y.npy")
    else:
        cube_y_data = y_data_var

    #cube_x_data = cube_x_data / (1+z)

    cube_x_original = cube_x_data
    cube_y_original = cube_y_data

    initial_mask = (cube_x_data > 3540 * (1+z))
    cube_x_data = cube_x_original[initial_mask] 
    cube_y_data = cube_y_original[initial_mask]

    # calculating the signal to noise
    sn_region = np.array([4000, 4080]) * (1+z) 
    sn_region_mask = ((cube_x_data > sn_region[0]) & (cube_x_data < sn_region[1]))
    
    cube_y_sn_region = cube_y_data[sn_region_mask]
    cy_sn_mean = np.mean(cube_y_sn_region)
    cy_sn_std = np.std(cube_y_sn_region)
    cy_sn = cy_sn_mean / cy_sn_std

    # cube noise
    cube_noise_data = cube_noise()
    spectrum_noise = cube_noise_data['spectrum_noise'] 
    spec_noise = spectrum_noise[initial_mask]

    lamRange = np.array([np.min(cube_x_data), np.max(cube_x_data)]) 
    specNew, logLam, velscale = log_rebin(lamRange, cube_y_data)
    lam = np.exp(logLam)
    
    loglam = np.log10(lam)
    # Only use the wavelength range in common between galaxy and stellar library.
    mask = (loglam > np.log10(3460)) & (loglam < np.log10(9464))
    flux = specNew[mask]
 
    galaxy = flux/np.median(flux)   # Normalize spectrum to avoid numerical issues
    loglam_gal = loglam[mask]
    lam_gal = 10**loglam_gal

    # galaxy spectrum not scaled 
    galaxy_ns = flux

    segmentation_data = hdu[2].data
    seg_loc_rows, seg_loc_cols = np.where(segmentation_data == cube_id)
    signal_pixels = len(seg_loc_rows) 

    spec_noise = spec_noise[mask]

    noise = np.nan_to_num((spec_noise * np.sqrt(signal_pixels)) / 
            np.abs(np.median(flux)))

    # Considering specific ranges
    if (isinstance(fit_range, str)):
        pass
    else: 
        fit_range = fit_range * (1+z)
        rtc_mask = ((loglam > np.log10(fit_range[0])) & 
                (loglam < np.log10(fit_range[1])))

        true_pixels = np.where(rtc_mask == True)[0]

        for i_noise in range(len(noise)):
            curr_noise = noise[i_noise]
            if i_noise in true_pixels:
                pass
            else:
                noise[i_noise] = 1.0

    # sky noise
    sky_noise = cube_reader.sky_noise("data/skyvariance_csub.fits")
    skyNew, skyLogLam, skyVelScale = log_rebin(lamRange, sky_noise[initial_mask])
    skyNew = skyNew[mask]

    c = 299792.458                  # speed of light in km/s
    frac = lam_gal[1]/lam_gal[0]    # Constant lambda fraction per pixel
    dlam_gal = (frac - 1)*lam_gal   # Size of every pixel in Angstrom

    data_shape = np.shape(galaxy)
    wdisp = np.full(data_shape, 1, dtype=float) # Intrinsic dispersion of every pixel

    sky_sigma_inst = np.load("data/sigma_inst.npy")
    fwhm_gal = 2.35*sky_sigma_inst*wdisp

    velscale = np.log(frac)*c       # Constant velocity scale in km/s per pixel

    # If the galaxy is at significant redshift, one should bring the galaxy
    # spectrum roughly to the rest-frame wavelength, before calling pPXF
    # (See Sec2.4 of Cappellari 2017). In practice there is no
    # need to modify the spectrum in any way, given that a red shift
    # corresponds to a linear shift of the log-rebinned spectrum.
    # One just needs to compute the wavelength range in the rest-frame
    # and adjust the instrumental resolution of the galaxy observations.
    # This is done with the following three commented lines:
    
    #lam_gal = lam_gal/(1+z)  # Compute approximate restframe wavelength
    #fwhm_gal = fwhm_gal/(1+z)   # Adjust resolution in Angstrom

    # Read the list of filenames from the Single Stellar Population library
    # by Vazdekis (2010, MNRAS, 404, 1639) http://miles.iac.es/. A subset
    # of the library is included for this example with permission

    # NOAO Coudé templates
    template_set = glob.glob("noao_templates/*.fits")
    fwhm_tem = 1.35 

    # Extended MILES templates
    #template_set = glob.glob('miles_models/s*.fits') 
    #fwhm_tem = 2.5
    
    # Jacoby templates
    #template_set = glob.glob('jacoby_models/jhc0*.fits')
    #fwhm_tem = 4.5 # instrumental resolution in Ångstroms.

    # Default templates
    #template_set = glob.glob('miles_models/Mun1.30Z*.fits')
    #fwhm_tem = 2.51 # Vazdekis+10 spectra have a constant resolution FWHM of 2.51A.

    # Extract the wavelength range and logarithmically rebin one spectrum
    # to the same velocity scale of the SDSS galaxy spectrum, to determine
    # the size needed for the array which will contain the template spectra.
    #
    hdu = fits.open(template_set[0])
    
    noao_data = hdu[1].data[0]
    ssp = noao_data[1]

    lam_temp = noao_data[0]

    lamRange_temp = [np.min(lam_temp), np.max(lam_temp)]
    sspNew = util.log_rebin(lamRange_temp, ssp, velscale=velscale)[0]
    templates = np.empty((sspNew.size, len(template_set)))

    # Interpolates the galaxy spectral resolution at the location of every pixel
    # of the templates. Outside the range of the galaxy spectrum the resolution
    # will be extrapolated, but this is irrelevant as those pixels cannot be
    # used in the fit anyway.
    fwhm_gal = np.interp(lam_temp, lam_gal, fwhm_gal)

    # Convolve the whole library of spectral templates
    # with the quadratic difference between the data and the
    # template instrumental resolution. Logarithmically rebin
    # and store each template as a column in the array TEMPLATES.

    # Quadratic sigma difference in pixels 
    # The formula below is rigorously valid if the shapes of the
    # instrumental spectral profiles are well approximated by Gaussians.
    #
    # In the line below, the fwhm_dif is set to zero when fwhm_gal < fwhm_tem.
    # In principle it should never happen and a higher resolution template should be used
    #
    fwhm_dif = np.sqrt((fwhm_gal**2 - fwhm_tem**2).clip(0)) 
    
    spacing = lam_temp[1] - lam_temp[0]
    sigma = fwhm_dif/2.355/spacing # Sigma difference in pixels
    for j, fname in enumerate(template_set):
        hdu = fits.open(fname)
        #ssp = hdu[0].data

        noao_data = hdu[1].data[0]
        ssp = noao_data[1]

        ssp = util.gaussian_filter1d(ssp, sigma)  # perform convolution with variable sigma
        sspNew = util.log_rebin(lamRange_temp, ssp, velscale=velscale)[0]
        templates[:, j] = sspNew/np.median(sspNew) # Normalizes templates

    # The galaxy and the template spectra do not have the same starting wavelength.
    # For this reason an extra velocity shift DV has to be applied to the template
    # to fit the galaxy spectrum. We remove this artificial shift by using the
    # keyword VSYST in the call to PPXF below, so that all velocities are
    # measured with respect to DV. This assume the redshift is negligible.
    # In the case of a high-redshift galaxy one should de-redshift its
    # wavelength to the rest frame before using the line below (see above).
    #
    c = 299792.458

    # attempt at de-redshifting the data but it does not work
    #dv = np.log(lam_temp[0]/(lam_gal[0]*(1+z)))*c    # km/s
    #goodpixels = util.determine_goodpixels(np.log(lam_gal*(1+z)), lamRange_temp, z) 

    dv = np.log(lam_temp[0]/lam_gal[0])*c    # km/s
    goodpixels = util.determine_goodpixels(np.log(lam_gal), lamRange_temp, z)

    # Here the actual fit starts. The best fit is plotted on the screen.
    # Gas emission lines are excluded from the pPXF fit using the GOODPIXELS keyword.
    #
    vel = c*np.log(1 + z)   # eq.(8) of Cappellari (2017)
    start = [vel, 200.]  # (km/s), starting guess for [V, sigma]

    t = process_time()

    # don't run pPXF plotter if custom spectra data is being processed
    if np.sum(y_data_var) != 0:
        plot_var = False
    else:
        plot_var = True

    f = io.StringIO()
    with redirect_stdout(f):
        pp = ppxf(templates, galaxy, noise, velscale, start, sky=skyNew,
            goodpixels=goodpixels, plot=plot_var, moments=4,
            degree=12, vsyst=dv, clean=True, lam=lam_gal) 

    ppxf_variables = pp.sol
    ppxf_errors = pp.error*np.sqrt(pp.chi2)
 
    red_chi2 = pp.chi2
    best_fit = pp.bestfit

    x_data = cube_x_data[mask]
    y_data = cube_y_data[mask]

    print(ppxf_variables)
    print(ppxf_errors)
    #plt.show()
    
    if ((np.sum(y_data_var) == 0) and isinstance(fit_range, str)):
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_ppxf_variables", 
                ppxf_variables) 
 
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_galaxy", 
                galaxy)

        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_lamgal", lam_gal) 
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_flux", flux)
 
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_x", x_data)
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_y", y_data)

        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_noise", noise)
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_spec_noise", spec_noise)

        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_not_scaled", galaxy_ns)

        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_goodpixels", goodpixels)

        # if best fit i.e. perturbation is 0, save everything
     
        kinematics_file = open(file_loc + "/cube_" + str(int(cube_id)) + 
            "_kinematics.txt", 'w')

        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_model", best_fit)

        print("Rough reduced chi-squared from ppxf: " + str(pp.chi2))
       
        data_to_file = f.getvalue()

        kinematics_file.write(data_to_file)
        kinematics_file.write("")

        kinematics_file.write("Formal errors: \n")
        kinematics_file.write("     dV    dsigma   dh3      dh4 \n")
        kinematics_file.write("".join("%8.2g" % f for f in pp.error*np.sqrt(pp.chi2)) 
                + "\n")

        kinematics_file.write('Elapsed time in PPXF: %.2f s' % (process_time() - t) 
                + "\n")

        plt.tight_layout()
        graph_loc = "ppxf_results" + "/cube_" + str(int(cube_id))
        if not os.path.exists(graph_loc):
            os.mkdir(graph_loc) 

        kinematics_graph = (graph_loc + "/cube_" + str(int(cube_id)) + 
                "_kinematics.pdf")
        plt.savefig(kinematics_graph)
        #plt.show()
        plt.close("all")
    if not isinstance(fit_range, str):
        # saving graphs if not original range
        fit_range = fit_range
        fitting_plotter(cube_id, fit_range, x_data, y_data, lam_gal, best_fit, noise)
        
        # goodpixels range specifier for array
        np.save(file_loc + "/cube_" + str(int(cube_id)) + "_goodpixels_" + 
                str(fit_range[0]) + "_" + str(fit_range[1]), goodpixels)

    plt.close("all")

    # save the goodpixels array depending on if a reduced ranged is being used

    # If the galaxy is at significant redshift z and the wavelength has been
    # de-redshifted with the three lines "z = 1.23..." near the beginning of
    # this procedure, the best-fitting redshift is now given by the following
    # commented line (equation 2 of Cappellari et al. 2009, ApJ, 704, L34):
    #
    print
    print('Best-fitting redshift z: '+str((z + 1)*(1 + ppxf_variables[0]/c) - 1))

    return {'reduced_chi2': red_chi2, 'noise': noise, 'variables': ppxf_variables,
            'y_data': galaxy, 'x_data': lam_gal, 'redshift': z, 
            'y_data_original': cube_y_original, 'non_scaled_y': galaxy_ns,
            'model_data': best_fit, 'noise_original': spec_noise,
            'errors': ppxf_errors}
Exemple #6
0
def voronoi_plotter(cube_id):
    vb_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                      str(cube_id) + "_binned.npy")  # Voronoi binned data

    ppxf_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                        str(cube_id) +
                        "_curr_voronoi_ppxf_results.npy")  # pPXF data
    lmfit_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                         str(cube_id) +
                         "_voronoi_lmfit_results.npy")  # lmfit data

    sn_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                      str(cube_id) +
                      "_curr_voronoi_sn_results.npy")  # signal to noise data

    oc_data = np.load("data/cubes_better/cube_" + str(int(cube_id)) + ".npy")

    # Array to store various maps
    # [0] : pPXF stellar velocity map - de-redshifted
    # [1] : pPXF stellar velocity dispersion map
    # [2] : lmfit gas velocity map - redshifted
    # [3] : lmfit gas velocity dispersion map
    # [4] : S/N map
    # [5] : pPXF velocity errors map
    # [6] : lmfit velocity errors map
    # [7] : Voronoi ID map
    # [8] : pPXF stellar velocity map - redshifted
    # [9] : lmfit gas velocity map - redshifted
    binned_data = np.zeros([10, np.shape(oc_data)[1], np.shape(oc_data)[0]])

    # obtaining redshift from integrated galaxy lmfit data
    lmfit_fitting = spectra_data.lmfit_data(cube_id)
    z = lmfit_fitting['z']

    # calculating velocity of galaxy based on redshift from integrated spectrum
    c = 299792.458  # speed of light in kms^-1
    vel_gal = c * np.log(1 + z)  # galaxy velocity

    # velocity of galaxy for the central pixel
    cen_pix_vel_ppxf = ppxf_data[1][2]
    cen_pix_vel_lmfit = lmfit_data[1][2]

    # adding 1 to ignore the "0" bins which is area out of segmentation map
    vb_data[:, 2] = vb_data[:, 2] + 1

    curr_row = 0
    for i_x in range(np.shape(oc_data)[0]):
        for i_y in range(np.shape(oc_data)[1]):
            vb_id = vb_data[curr_row][2]
            binned_data[7][i_y][i_x] = vb_id

            # S/N variable
            sn_loc = np.where(sn_data[:, 1] == vb_id)[0]
            sn_vars = sn_data[sn_loc][0]

            binned_data[4][i_y][i_x] = sn_vars[2]  # current signal-to-noise

            # pPXF variables and errors
            ppxf_loc = np.where(ppxf_data[:, 1] == vb_id)[0]
            ppxf_vars = ppxf_data[ppxf_loc][0]

            binned_data[8][i_y][i_x] = ppxf_vars[2]  # redshifted velocity
            binned_data[5][i_y][i_x] = ppxf_vars[4]  # pPXF velocity error

            # lmfit variables
            lmfit_loc = np.where(lmfit_data[:, 1] == vb_id)[0]
            lmfit_vars = lmfit_data[lmfit_loc][0]

            binned_data[9][i_y][i_x] = lmfit_vars[2]  # redshifted velocity
            binned_data[6][i_y][i_x] = lmfit_vars[4]  # lmfit velocity error

            # storing values which are only high enough S/N
            if sn_vars[2] > 4:
                # rest velocities for pPXF and lmfit
                binned_data[0][i_y][i_x] = ppxf_vars[2] - cen_pix_vel_ppxf
                binned_data[2][i_y][i_x] = lmfit_vars[2] - cen_pix_vel_lmfit
            if sn_vars[2] > 7:
                # velocity dispersions for pPXF and lmfit
                binned_data[1][i_y][i_x] = ppxf_vars[3]  # velocity dispersion
                binned_data[3][i_y][i_x] = lmfit_vars[3]  # velocity dispersion

            curr_row += 1

    # loading the binary segmentation map
    seg_map = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                      str(int(cube_id)) + "_segmentation.npy")

    # rotate the maps and save them as a numpy array instead of during imshow plotting
    binned_data = np.fliplr(np.rot90(binned_data, 1, (1, 2)))
    binned_data = binned_data * seg_map

    np.save(
        "cube_results/cube_" + str(int(cube_id)) + "/cube_" +
        str(int(cube_id)) + "_maps.npy", binned_data)

    ppxf_vel_data = binned_data[0]
    ppxf_sigma_data = binned_data[1]

    lmfit_vel_data = binned_data[2]
    lmfit_sigma_data = binned_data[3]

    curr_sn_data = binned_data[4]

    ppxf_vel_unique = np.unique(ppxf_vel_data)
    ppxf_vel_data[ppxf_vel_data == 0] = np.nan

    ppxf_sigma_unique = np.unique(ppxf_sigma_data)
    ppxf_sigma_data[ppxf_sigma_data == 0] = np.nan

    lmfit_vel_unique = np.unique(lmfit_vel_data)
    lmfit_vel_data[lmfit_vel_data == 0] = np.nan

    lmfit_sigma_unique = np.unique(lmfit_sigma_data)
    lmfit_sigma_data[lmfit_sigma_data == 0] = np.nan

    curr_sn_data_unique = np.unique(curr_sn_data)
    curr_sn_data[curr_sn_data == 0] = np.nan

    # setting nan values to black
    #current_cmap = plt.cm.jet
    #current_cmap.set_bad(color='black')

    f, (ax1, ax2) = plt.subplots(1, 2)
    fax1 = ax1.imshow(ppxf_vel_data,
                      cmap='jet',
                      vmin=ppxf_vel_unique[1],
                      vmax=ppxf_vel_unique[-2])
    ax1.tick_params(labelsize=20)
    ax1.set_title(r'\textbf{$V_{*}$ (kms$^{-1}$)}', fontsize=20)

    divider = make_axes_locatable(ax1)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    fcbar = f.colorbar(fax1, ax=ax1, cax=cax)
    fcbar.ax.tick_params(labelsize=20)

    fax2 = ax2.imshow(ppxf_sigma_data,
                      cmap='jet',
                      vmin=ppxf_sigma_unique[1],
                      vmax=ppxf_sigma_unique[-1])
    ax2.tick_params(labelsize=20)
    ax2.set_title(r'\textbf{$\sigma_{*}$ (kms$^{-1}$)}', fontsize=20)

    divider = make_axes_locatable(ax2)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    fcbar = f.colorbar(fax2, ax=ax2, cax=cax)
    fcbar.ax.tick_params(labelsize=20)

    f.tight_layout()
    f.savefig("cube_results/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
              "_ppxf_maps.pdf",
              bbox_inches="tight")

    g, (ax3, ax4) = plt.subplots(1, 2)
    gax3 = ax3.imshow(lmfit_vel_data,
                      cmap='jet',
                      vmin=ppxf_vel_unique[1],
                      vmax=ppxf_vel_unique[-2])
    ax3.tick_params(labelsize=20)
    ax3.set_title(r'\textbf{$V_{OII}$ (kms$^{-1}$)}', fontsize=20)

    divider = make_axes_locatable(ax3)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    gcbar = g.colorbar(gax3, ax=ax3, cax=cax)
    gcbar.ax.tick_params(labelsize=20)

    gax4 = ax4.imshow(lmfit_sigma_data,
                      cmap='jet',
                      vmin=ppxf_sigma_unique[1],
                      vmax=ppxf_sigma_unique[-1])
    ax4.tick_params(labelsize=20)
    ax4.set_title(r'\textbf{$\sigma_{OII}$ (kms$^{-1}$)}', fontsize=20)

    divider = make_axes_locatable(ax4)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    gcbar = g.colorbar(gax4, ax=ax4, cax=cax)
    gcbar.ax.tick_params(labelsize=20)

    g.tight_layout()
    g.savefig("cube_results/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
              "_lmfit_maps.pdf",
              bbox_inches="tight")

    h, (ax5) = plt.subplots(1, 1)
    hax5 = ax5.imshow(curr_sn_data,
                      cmap='jet',
                      vmin=np.min(sn_data[:, 2]),
                      vmax=np.max(sn_data[:, 2]))

    #ax5.axis('off')
    ax5.tick_params(labelsize=20)
    #ax5.set_title(r'\textbf{S/N Map}', fontsize=20)

    divider = make_axes_locatable(ax5)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    hcbar = h.colorbar(hax5, ax=ax5, cax=cax)
    hcbar.ax.tick_params(labelsize=20)
    hcbar.ax.set_ylabel(r"$\bf{S/N}$", fontsize=20, rotation=270, labelpad=30)

    h.tight_layout()
    h.savefig("cube_results/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
              "_signal_noise_map.pdf",
              bbox_inches="tight")
Exemple #7
0
def voronoi_runner():
    # Running to obtain results from pPXF and OII fitting
    cf = ppxf_fitter.cat_func()
    catalogue = cf['cat']  # calling sorted catalogue from cataogue function
    bright_objects = cf['bo']

    uc = ppxf_fitter.usable_cubes(catalogue, bright_objects)  # usable cubes
    #uc = uc[11:]
    uc = np.array([849])
    print(uc)
    for i_cube in range(len(uc)):
        cube_id = int(uc[i_cube])

        l, (sax1) = plt.subplots(1, 1)  # spectra and pPXF on same plot
        m, (max1) = plt.subplots(1, 1)  # pPXF plots
        n, (nax1) = plt.subplots(1, 1)  # spectra plots

        # loading the MUSE spectroscopic data
        file_name = (
            "/Volumes/Jacky_Cao/University/level4/project/cubes_better/" +
            "cube_" + str(cube_id) + ".fits")
        fits_file = cube_reader.read_file(file_name)

        image_data = fits_file[
            1]  # image data from fits file is "wrong way around"
        image_data = np.fliplr(np.rot90(image_data, 1, (1, 2)))

        # loading Voronoi map - need to add 1 to distinguish between 1st bin and
        # the 'off' areas as defined by binary segmentation map
        vor_map = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                          str(int(cube_id)) + "_voronoi_map.npy") + 1

        # loading the binary segmentation map
        seg_map = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                          str(int(cube_id)) + "_segmentation.npy")

        vor_map = vor_map * seg_map
        voronoi_unique = np.unique(vor_map)

        # loading the wavelength solution
        ws_data = cube_reader.wavelength_solution(file_name)
        wl_sol = np.linspace(ws_data['begin'], ws_data['end'],
                             ws_data['steps'])

        # open the voronoi binned data
        voronoi_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                               str(cube_id) + "_binned.npy")

        # Array which stores cube_id, VorID, S/N for region
        cube_sn_results = np.zeros([len(voronoi_unique), 3])

        # Array which stores cube_id, VorID, pPXF vel, pPXF sigma, vel err
        cube_ppxf_results = np.zeros([len(voronoi_unique), 5])

        # Array which stores cube_id, VorID, lmfit vel, and lmfit sigma (converted)
        cube_lmfit_results = np.zeros([len(voronoi_unique), 5])

        # Applying the segmentation map to the Voronoi map
        # I want to work with the converted map

        for i_vid in range(len(voronoi_unique)):
            if i_vid == 0:
                # ignoring the 'off' areas of the Voronoi map
                pass
            else:
                curr_vid = int(voronoi_unique[i_vid])
                print("Considering cube_" + str(cube_id) + " and Voronoi ID " +
                      str(curr_vid))

                # find what pixels are at the current voronoi id
                curr_where = np.where(vor_map == curr_vid)

                # create a single spectra from the found pixels
                spectra = np.zeros(
                    [len(curr_where[0]),
                     np.shape(image_data)[0]])
                print(np.shape(spectra))

                if len(curr_where) == 1:
                    pixel_x = int(curr_where[0])
                    pixel_y = int(curr_where[1])

                    single_spec = image_data[:][:, pixel_x][:, pixel_y]
                    spectra[0] = single_spec
                else:
                    spec_counter = 0
                    for i_x in range(len(curr_where[0])):
                        # looking at current x and y positions
                        pixel_x = int(curr_where[0][i_x])
                        pixel_y = int(curr_where[1][i_x])

                        curr_spec = image_data[:][:, pixel_y][:, pixel_x]

                        # saving spectra into specific row of spectra array
                        spectra[spec_counter] = curr_spec

                        spec_counter += 1

                spectra = np.nansum(spectra, axis=0)

                # calculate the S/N on the new generated spectra
                # parameters from lmfit
                lm_params = spectra_data.lmfit_data(cube_id)
                z = lm_params['z']

                region = np.array([4000, 4080]) * (1 + z)
                region_mask = ((wl_sol > region[0]) & (wl_sol < region[1]))

                # masking useful region
                masked_wlr = wl_sol[region_mask]
                masked_spec = spectra[region_mask]

                signal = masked_spec
                noise = np.std(masked_spec)

                signal_noise = np.abs(np.average(signal / noise))

                cube_sn_results[i_vid][0] = int(cube_id)
                cube_sn_results[i_vid][1] = int(i_vid)
                cube_sn_results[i_vid][2] = int(signal_noise)

                np.save(
                    "cube_results/cube_" + str(cube_id) + "/cube_" +
                    str(cube_id) + "_curr_voronoi_sn_results.npy",
                    cube_sn_results)

                # run pPXF on the final spectra and store results
                if np.isnan(np.sum(spectra)) == True:
                    ppxf_vel = 0
                    ppxf_sigma = 0
                else:
                    ppxf_run = ppxf_fitter_kinematics_sdss.kinematics_sdss(
                        cube_id, spectra, "all")
                    plt.close("all")

                    # variables from pPXF
                    ppxf_vars = ppxf_run['variables']
                    ppxf_vel = ppxf_vars[0]
                    ppxf_sigma = ppxf_vars[1]

                    # errors from pPXF
                    ppxf_errs = ppxf_run['errors']
                    ppxf_vel_err = ppxf_errs[0]

                    # use the returned data from pPXF to plot the spectra
                    x_data = ppxf_run['x_data']
                    y_data = ppxf_run['y_data']
                    best_fit = ppxf_run['model_data']

                    # plot indidividual spectra
                    indiv_spec_dir = ("cube_results/cube_" + str(cube_id) +
                                      "/voronoi_spectra")

                    if not os.path.exists(indiv_spec_dir):
                        os.mkdir(indiv_spec_dir)

                    t, (tax1) = plt.subplots(1, 1)
                    tax1.plot(x_data, y_data, lw=1.5, c="#000000")
                    tax1.plot(x_data, best_fit, lw=1.5, c="#d32f2f")

                    tax1.tick_params(labelsize=20)
                    tax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
                    tax1.set_ylabel(r'\textbf{Relative Flux}', fontsize=20)

                    t.tight_layout()
                    t.savefig(indiv_spec_dir + "/cube_" + str(cube_id) + "_" +
                              str(i_vid) + "_spectra.pdf")
                    plt.close("all")

                    # plotting initial spectra
                    sax1.plot(x_data, y_data, lw=1.5, c="#000000")
                    # plotting pPXF best fit
                    sax1.plot(x_data, best_fit, lw=1.5, c="#d32f2f")

                    max1.plot(x_data,
                              best_fit + 150 * i_vid,
                              lw=1.5,
                              c="#d32f2f")
                    nax1.plot(x_data,
                              y_data + 1000 * i_vid,
                              lw=1.5,
                              c="#000000")

                # Storing data into cube_ppxf_results array
                cube_ppxf_results[i_vid][0] = int(cube_id)
                cube_ppxf_results[i_vid][1] = int(i_vid)
                cube_ppxf_results[i_vid][2] = ppxf_vel
                cube_ppxf_results[i_vid][3] = ppxf_sigma
                cube_ppxf_results[i_vid][4] = ppxf_vel_err

                np.save(
                    "cube_results/cube_" + str(cube_id) + "/cube_" +
                    str(cube_id) + "_curr_voronoi_ppxf_results.npy",
                    cube_ppxf_results)

                # fitting OII doublet for the final spectra
                # wavelength solution
                x_data = np.load("cube_results/cube_" + str(cube_id) +
                                 "/cube_" + str(cube_id) + "_cbd_x.npy")

                # loading redshift and sigma_inst
                doublet_params = spectra_data.lmfit_data(cube_id)
                z = doublet_params['z']
                sigma_inst = doublet_params['sigma_inst']

                # masking out doublet region
                x_mask = ((x_data > (1 + z) * 3600) & (x_data <
                                                       (1 + z) * 3750))
                x_masked = x_data[x_mask]
                y_masked = spectra[x_mask]

                oii_doublets = [3727.092, 3729.875]

                dbt_params = Parameters()
                dbt_params.add('c', value=0)
                dbt_params.add('i1', value=np.max(y_masked), min=0.0)
                dbt_params.add('r', value=1.3, min=0.5, max=1.5)
                dbt_params.add('i2', expr='i1/r', min=0.0)
                dbt_params.add('sigma_gal', value=3)
                dbt_params.add('z', value=z)
                dbt_params.add('sigma_inst', value=sigma_inst, vary=False)

                dbt_model = Model(spectra_data.f_doublet)
                dbt_result = dbt_model.fit(y_masked,
                                           x=x_masked,
                                           params=dbt_params)

                best_result = dbt_result.best_values
                best_z = best_result['z']
                best_sigma = best_result['sigma_gal']

                c = 299792.458  # speed of light in kms^-1

                lmfit_vel = c * np.log(1 + best_z)
                lmfit_sigma = (best_sigma / (3727 * (1 + best_z))) * c

                lmfit_errors = dbt_result.params
                z_err = lmfit_errors['z'].stderr

                if z_err is None:
                    z_err = 0.0

                lmfit_vel_err = c * np.log(1 + z_err)

                # indexing data into lmfit array
                cube_lmfit_results[i_vid][0] = int(cube_id)
                cube_lmfit_results[i_vid][1] = int(i_vid)
                cube_lmfit_results[i_vid][2] = lmfit_vel
                cube_lmfit_results[i_vid][3] = lmfit_sigma
                cube_lmfit_results[i_vid][4] = lmfit_vel_err

        sax1.tick_params(labelsize=20)
        sax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
        sax1.set_ylabel(r'\textbf{Relative Flux}', fontsize=20)
        l.tight_layout()
        l.savefig("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                  str(int(cube_id)) + "_voronoi_spectra_stacked.pdf")

        max1.tick_params(labelsize=20)
        max1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
        max1.set_ylabel(r'\textbf{Relative Flux}', fontsize=20)
        m.tight_layout()
        m.savefig("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                  str(int(cube_id)) + "_voronoi_spectra_stacked_ppxf.pdf")

        nax1.tick_params(labelsize=20)
        nax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=20)
        nax1.set_ylabel(r'\textbf{Relative Flux}', fontsize=20)
        n.tight_layout()
        n.savefig("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                  str(int(cube_id)) + "_voronoi_spectra_stacked_spectra.pdf")

        # Save each cube_ppxf_results into cube_results folder
        np.save(
            "cube_results/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
            "_voronoi_ppxf_results.npy", cube_ppxf_results)

        # saving cube_lmfit_results into cube_results folder
        np.save(
            "cube_results/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
            "_voronoi_lmfit_results.npy", cube_lmfit_results)

        voronoi_plotter(cube_id)
        rotation_curves(cube_id)
Exemple #8
0
def lmfit_uncertainty(cubes, runs):
    data = np.zeros([len(cubes), runs, 7])

    # 1st dimension: one array per cube
    # 2nd dimension: same number of rows as runs variable
    # 3rd dimension: columns to store data
    #   [0] : new signal value [= signal + perturbation]
    #   [1] : new sigma produced
    #   [2] : (sigma_best - sigma_new) / sigma_best
    #   [3] : new signal to noise value
    #   [4] : new signal to noise which has been scaled
    #   [5] : new velocity produced
    #   [6] : (vel_best - vel_new) / vel_best

    # Looping over all of the provided cubes
    for i_cube in range(len(cubes)):
        cube_id = cubes[i_cube]

        # Pulling the parameters for the best fit
        bd = spectra_data.lmfit_data(cube_id)  # best data
        best_sigma = bd['sigma_gal']
        best_z = bd['z']

        c = 299792.458  # speed of light in kms^-1
        best_vel = c * np.log(1 + best_z)

        # Load (non-redshifted) wavelength and flux data
        x_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                         str(cube_id) + "_corr_x.npy")
        y_data = np.load("cube_results/cube_" + str(cube_id) + "/cube_" +
                         str(cube_id) + "_cps_y.npy")

        # Define doublet region to consider to be 3500Å to 3700Å
        dr_mask = ((x_data > 3500) & (x_data < 3700))

        x_masked = x_data[dr_mask]
        y_masked = y_data[dr_mask]

        # Standard deviation of the data in region before the doublet
        data_std = np.std(y_masked)

        # Fake data created based on the best fitting parameters
        x_fake = np.linspace(3500, 3800, 600) * (1 + best_z)
        y_fake = spectra_data.f_doublet(x_fake, bd['c'], bd['i1'], bd['i2'],
                                        bd['sigma_gal'], bd['z'],
                                        bd['sigma_inst'])

        plt.figure()
        plt.plot(x_fake / (1 + best_z), y_fake)
        #plt.show()

        spectrum = y_fake

        # doublet region where signal lies
        drm = np.where(spectrum > bd['c'] * x_fake)
        dr_x_masked = x_fake[drm]
        dr_y_masked = spectrum[drm]

        # Looping over the number of runs specified
        for curr_loop in range(runs):
            print("working with " + str(cube_id) + " and index " +
                  str(curr_loop))
            # Perturb the fake flux data by adding an amount of Gaussian noise
            random_noise = np.random.normal(0, data_std, len(x_fake))

            if (curr_loop < int(3 / 4 * runs)):
                random_noise = random_noise
            else:
                random_noise = 10 * random_noise

            spectrum = spectrum + random_noise

            xf_dr = x_fake / (1 + best_z)

            # Want to attempt a refitting of the Gaussian doublet over the new data
            gauss_params = Parameters()
            gauss_params.add('c', value=bd['c'])
            gauss_params.add('i1', value=bd['i1'], min=0.0)
            gauss_params.add('r', value=1.3, min=0.5, max=1.5)
            gauss_params.add('i2', expr='i1/r', min=0.0)
            gauss_params.add('sigma_gal', value=bd['sigma_gal'])
            gauss_params.add('z', value=bd['z'])
            gauss_params.add('sigma_inst', value=bd['sigma_inst'], vary=False)

            gauss_model = Model(spectra_data.f_doublet)
            gauss_result = gauss_model.fit(spectrum,
                                           x=x_fake,
                                           params=gauss_params)

            new_best_fit = gauss_result.best_fit

            new_best_values = gauss_result.best_values
            new_best_sigma = new_best_values['sigma_gal']
            new_z = new_best_values['z']

            c = 299792.458  # speed of light in kms^-1
            new_best_vel = c * np.log(1 + new_z)

            sigma_ratio = np.abs(best_sigma - new_best_sigma) / best_sigma
            vel_ratio = np.abs(best_vel - new_best_vel) / best_vel

            # non-doublet region to calculate noise
            ndr_mask = ((xf_dr > 3500) & (xf_dr < 3700))
            new_x_masked = xf_dr[ndr_mask]
            new_y_masked = spectrum[ndr_mask]
            new_noise = np.std(new_y_masked)

            new_signal = np.max(spectrum)
            signal_len = len(
                spectrum[drm])  # size of signal to scale the noise
            #print(drm, spectrum[drm], signal_len)
            #print(np.max(spectrum), np.median(spectrum[drm]))

            new_sn_scaled = new_signal / (new_noise * np.sqrt(signal_len))
            new_sn = new_signal / (new_noise)
            #print(new_sn, new_signal, new_noise, np.sqrt(signal_len))

            data[i_cube][curr_loop][0] = new_signal  # new signal
            data[i_cube][curr_loop][1] = new_best_sigma  # new doublet sigma
            data[i_cube][curr_loop][2] = np.abs(
                sigma_ratio)  # new fractional error sig
            data[i_cube][curr_loop][3] = new_sn  # new S/N
            data[i_cube][curr_loop][4] = new_sn_scaled  # new scaled S/N
            data[i_cube][curr_loop][5] = new_best_vel  # new velocity
            data[i_cube][curr_loop][6] = vel_ratio  # new fractional error vel

            plt.figure()
            plt.plot(xf_dr, y_fake, linewidth=0.5, color="#8bc34a")
            plt.plot(xf_dr, spectrum, linewidth=0.5, color="#000000")

            plt.plot(xf_dr, new_best_fit, linewidth=0.5, color="#d32f2f")

            plt.xlabel(r'\textbf{S/N}', fontsize=15)
            plt.ylabel(r'\textbf{Flux}', fontsize=15)

            plt.tight_layout()

            uncert_lmfit_dir = "uncert_lmfit/cube_" + str(cube_id)
            if not os.path.exists(uncert_lmfit_dir):
                os.mkdir(uncert_lmfit_dir)
            plt.savefig(uncert_lmfit_dir + "/cube_" + str(cube_id) + "_" +
                        str(curr_loop) + ".pdf")
            plt.close("all")

            np.save(
                "uncert_lmfit/cube_" + str(cube_id) + "/cube_" + str(cube_id) +
                "_lmfit_perts", data[:, i_cube][:][:])

    np.save("data/lmfit_uncert_data", data)
Exemple #9
0
def vel_stars_vs_vel_oii():
    data = np.load("data/ppxf_fitter_data.npy")

    fig, ax = plt.subplots()

    gq_val = []  # graph-quantifier value list
    chi_sq_list = []  # chi-squared list
    percent_diff_list = []  # percentage difference list

    unusable = np.array([849, 549, 1075, 895])  # from S/N and sigma cut off

    for i_d in range(len(data[:][:, 0])):
        cc_d = data[:][:, 0][i_d]  # current cube data
        cube_id = int(cc_d[0])

        if cube_id not in unusable:
            cc_sn = cc_d[7]  # current S/N for the cube

            lmfit_vals = spectra_data.lmfit_data(cube_id)
            cube_z = lmfit_vals['z']
            cube_z_err = lmfit_vals['err_z']

            vel_oii = oii_velocity(cube_z)  # OII from lmfit fitting
            vel_oii_err = oii_velocity(lmfit_vals['err_z'])
            #vel_oii = 0 # the velocities should be 0 as they have not been redshifted

            vel_ppxf = cc_d[14]  # stellar fitting from pPXF
            vel_ppxf_err = cc_d[15]

            # loading "a" factors in a/x model
            a_ppxf = np.load("uncert_ppxf/vel_curve_best_values_ppxf.npy")
            a_lmfit = np.load("uncert_lmfit/vel_curve_best_values_lmfit.npy")

            # fractional error
            frac_err_ppxf = (a_ppxf / cc_sn) * vel_ppxf
            frac_err_lmfit = (a_lmfit / cc_sn) * vel_oii

            gq_val.append(vel_ppxf / vel_oii)

            csq = chisq(vel_oii, frac_err_lmfit, vel_ppxf)
            chi_sq_list.append(csq)

            pd = (vel_ppxf - vel_oii) / vel_ppxf
            percent_diff_list.append(pd)

            ax.errorbar(vel_ppxf,
                        vel_oii,
                        yerr=frac_err_lmfit,
                        xerr=frac_err_ppxf,
                        color="#000000",
                        fmt="o",
                        ms=6,
                        elinewidth=1.0,
                        capsize=5,
                        capthick=1.0,
                        zorder=0)

        #ax.annotate(cube_id, (vel_oii, vel_ppxf))

    av_percent_diff = np.average(
        percent_diff_list) * 100  # average percentage difference
    print("vel_stars_vs_vel_oii, percentage difference: " +
          str(av_percent_diff))

    chi_sq = np.sum(chi_sq_list)
    red_chi_sq = np.sum(chi_sq_list) / len(chi_sq_list)
    print("vel_stars_vs_vel_oii, red_chi_sq: " + str(red_chi_sq))

    ax.tick_params(labelsize=18)
    ax.set_xlabel(r'\textbf{V$_{*}$ (km s$^{-1}$)}', fontsize=20)
    ax.set_ylabel(r'\textbf{V$_{OII}$ (km s$^{-1}$)}', fontsize=20)

    ax.set_xlim([75000, 175_000])
    ax.set_ylim([75000, 175_000])

    # plot 1:1 line
    f_xd = np.linspace(0, 275000, 275000)
    ax.plot(f_xd, f_xd, lw=1.5, color="#000000", alpha=0.3)

    #ax.annotate("median y/x val: "+str(np.median(gq_val)), (90_000,260_000))

    fig.tight_layout()
    fig.savefig("graphs/vel_star_vs_vel_oii.pdf", bbox_inches="tight")
    plt.close("all")
Exemple #10
0
def fitting_plotter(cube_id):
    # defining wavelength as the x-axis
    x_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                     str(int(cube_id)) + "_lamgal.npy")

    # defining the flux from the data and model
    y_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                     str(int(cube_id)) + "_flux.npy")
    y_model = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                      str(int(cube_id)) + "_model.npy")

    # scaled down y data
    y_data_scaled = y_data / np.median(y_data)

    # opening cube to obtain the segmentation data
    cube_file = (
        "/Volumes/Jacky_Cao/University/level4/project/cubes_better/cube_" +
        str(cube_id) + ".fits")
    hdu = fits.open(cube_file)
    segmentation_data = hdu[2].data
    seg_loc_rows, seg_loc_cols = np.where(segmentation_data == cube_id)
    signal_pixels = len(seg_loc_rows)

    # noise spectra will be used as in the chi-squared calculation
    noise = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                    str(int(cube_id)) + "_noise.npy")
    noise_median = np.median(noise)
    noise_stddev = np.std(noise)

    residual = y_data_scaled - y_model
    res_median = np.median(residual)
    res_stddev = np.std(residual)

    noise = noise

    mask = ((residual < res_stddev) & (residual > -res_stddev))

    chi_sq = (y_data_scaled[mask] - y_model[mask])**2 / noise[mask]**2
    total_chi_sq = np.sum(chi_sq)

    total_points = len(chi_sq)
    reduced_chi_sq = total_chi_sq / total_points

    print("Cube " + str(cube_id) + " has a reduced chi-squared of " +
          str(reduced_chi_sq))

    # spectral lines
    sl = spectra_data.spectral_lines()

    # parameters from lmfit
    lm_params = spectra_data.lmfit_data(cube_id)
    c = lm_params['c']
    i1 = lm_params['i1']
    i2 = lm_params['i2']
    sigma_gal = lm_params['sigma_gal']
    z = lm_params['z']
    sigma_inst = lm_params['sigma_inst']

    plt.figure()

    plt.plot(x_data, y_data_scaled, linewidth=1.1, color="#000000")
    plt.plot(x_data,
             y_data_scaled + noise_stddev,
             linewidth=0.1,
             c="#616161",
             alpha=0.1)
    plt.plot(x_data,
             y_data_scaled - noise_stddev,
             linewidth=0.1,
             c="#616161",
             alpha=0.1)

    # plotting over the OII doublet
    doublets = np.array([3727.092, 3728.875]) * (1 + z)
    dblt_av = np.average(doublets)

    dblt_x_mask = ((x_data > dblt_av - 20) & (x_data < dblt_av + 20))
    doublet_x_data = x_data[dblt_x_mask]
    doublet_data = spectra_data.f_doublet(doublet_x_data, c, i1, i2, sigma_gal,
                                          z, sigma_inst)
    doublet_data = doublet_data / np.median(y_data)
    plt.plot(doublet_x_data, doublet_data, linewidth=0.5, color="#9c27b0")

    max_y = np.max(y_data_scaled)
    # plotting spectral lines
    for e_key, e_val in sl['emis'].items():
        spec_line = float(e_val) * (1 + z)
        spec_label = e_key

        if (e_val in str(doublets)):
            alpha_line = 0.2
        else:
            alpha_line = 0.7

        alpha_text = 0.75

        plt.axvline(x=spec_line,
                    linewidth=0.5,
                    color="#1e88e5",
                    alpha=alpha_line)
        plt.text(spec_line - 3,
                 max_y,
                 spec_label,
                 rotation=-90,
                 alpha=alpha_text,
                 weight="bold",
                 fontsize=15)

    for e_key, e_val in sl['abs'].items():
        spec_line = float(e_val) * (1 + z)
        spec_label = e_key

        plt.axvline(x=spec_line, linewidth=0.5, color="#ff8f00", alpha=0.7)
        plt.text(spec_line - 3,
                 max_y,
                 spec_label,
                 rotation=-90,
                 alpha=0.75,
                 weight="bold",
                 fontsize=15)

    # iron spectral lines
    for e_key, e_val in sl['iron'].items():
        spec_line = float(e_val) * (1 + z)

        plt.axvline(x=spec_line, linewidth=0.5, color="#bdbdbd", alpha=0.3)

    plt.plot(x_data, y_model, linewidth=1.5, color="#b71c1c")

    residuals_mask = (residual > res_stddev)
    rmask = residuals_mask

    #plt.scatter(x_data[rmask], residual[rmask], s=3, color="#f44336", alpha=0.5)
    plt.scatter(x_data[mask], residual[mask] - 1, s=3, color="#43a047")

    plt.tick_params(labelsize=15)
    plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=15)
    plt.ylabel(r'\textbf{Relative Flux}', fontsize=15)
    plt.tight_layout()
    plt.savefig("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                str(int(cube_id)) + "_fitted.pdf")

    plt.close("all")

    return {'chi2': total_chi_sq, 'redchi2': reduced_chi_sq}
def diag_results(cube_id):
    def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst):
        """ function for Gaussian doublet """
        dblt_mu = [3727.092, 3729.875]  # the actual non-redshifted wavelengths
        l1 = dblt_mu[0] * (1 + z)
        l2 = dblt_mu[1] * (1 + z)

        sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)

        norm = (sigma * np.sqrt(2 * np.pi))
        term1 = (i1 / norm) * np.exp(-(x - l1)**2 / (2 * sigma**2))
        term2 = (i2 / norm) * np.exp(-(x - l2)**2 / (2 * sigma**2))
        return (c * x + term1 + term2)

    with PdfPages('diagnostics/cube_' + str(cube_id) +
                  '_diagnostic.pdf') as pdf:
        analysis = cube_reader.analysis(
            "/Volumes/Jacky_Cao/University/level4/" +
            "project/cubes_better/cube_" + str(cube_id) + ".fits",
            "data/skyvariance_csub.fits")

        # calling data into variables
        icd = analysis['image_data']

        segd = analysis['spectra_data']['segmentation']

        sr = analysis['sr']
        df_data = analysis['df_data']
        gs_data = analysis['gs_data']
        snw_data = analysis['snw_data']

        # images of the galaxy
        f, (ax1, ax2) = plt.subplots(1, 2)
        ax1.imshow(icd['median'], cmap='gray_r')
        ax1.set_title(r'\textbf{Galaxy Image: Median}', fontsize=13)
        ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13)
        ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13)

        ax2.imshow(icd['sum'], cmap='gray_r')
        ax2.set_title(r'\textbf{Galaxy Image: Sum}', fontsize=13)
        ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13)
        ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13)
        f.subplots_adjust(wspace=0.4)

        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # segmentation area used to extract the 1D spectra
        segd_mask = ((segd == cube_id))

        plt.figure()
        plt.title(r'\textbf{Segmentation area used to extract 1D spectra}',
                  fontsize=13)
        plt.imshow(np.rot90(segd_mask, 1), cmap='Paired')
        plt.xlabel(r'\textbf{Pixels}', fontsize=13)
        plt.ylabel(r'\textbf{Pixels}', fontsize=13)
        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # spectra plotting
        f, (ax1, ax2) = plt.subplots(2, 1)
        # --- redshifted data plotting
        cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps'])

        ## plotting our cube data
        cbs_y = gs_data['gd_shifted']
        ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000")

        ## plotting our sky noise data
        snd_y = snw_data['sky_regions'][:, 1]
        ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5)

        ## plotting our [OII] region
        ot_x = df_data['x_region']
        ot_y = df_data['y_region']
        ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853")

        ## plotting the standard deviation region in the [OII] section
        std_x = df_data['std_x']
        std_y = df_data['std_y']
        ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1")

        pu_lines = gs_data['pu_peaks']
        for i in range(len(pu_lines)):
            srb = sr['begin']
            ax1.axvline(x=(pu_lines[i]),
                        linewidth=0.5,
                        color="#ec407a",
                        alpha=0.2)

        ax1.set_title(r'\textbf{Spectra: cross-section redshifted}',
                      fontsize=13)
        ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
        ax1.set_ylabel(r'\textbf{Flux}', fontsize=13)
        ax1.set_ylim([-1000, 5000])  # setting manual limits for now

        # --- corrected redshift
        crs_x = np.linspace(sr['begin'], sr['end'], sr['steps'])
        rdst = gs_data['redshift']

        sp_lines = gs_data['spectra']

        ## corrected wavelengths
        corr_x = crs_x / (1 + rdst)

        ## plotting our cube data
        cps_y = gs_data['gd_shifted']
        ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000")

        ## plotting our sky noise data
        sn_y = gs_data['sky_noise']
        ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935")

        ## plotting spectra lines
        for e_key, e_val in sp_lines['emis'].items():
            spec_line = float(e_val)
            ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853")
            ax2.text(spec_line - 10, 4800, e_key, rotation=-90)

        ax2.set_title(r'\textbf{Spectra: cross-section corrected}',
                      fontsize=13)
        ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
        ax2.set_ylabel(r'\textbf{Flux}', fontsize=13)
        ax2.set_ylim([-500, 5000])  # setting manual limits for now

        f.subplots_adjust(hspace=0.5)
        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # OII doublet region
        ot_fig = plt.figure()
        # plotting the data for the cutout [OII] region
        ot_x = df_data['x_region']
        ot_y = df_data['y_region']
        plt.plot(ot_x, ot_y, linewidth=0.5, color="#000000")

        ## plotting the standard deviation region in the [OII] section
        std_x = df_data['std_x']
        std_y = df_data['std_y']
        plt.plot(std_x, std_y, linewidth=0.5, color="#00acc1")

        dblt_rng = df_data['doublet_range']
        ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1]
        x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000)

        # lmfit
        lm_init = df_data['lm_init_fit']
        lm_best = df_data['lm_best_fit']

        plt.plot(ot_x, lm_best, linewidth=0.5, color="#1e88e5")
        plt.plot(ot_x, lm_init, linewidth=0.5, color="#43a047", alpha=0.5)

        lm_params = df_data['lm_best_param']
        lm_params = [prm_value for prm_key, prm_value in lm_params.items()]
        c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params

        dblt_mu = [3727.092,
                   3729.875]  # the actual non-redshifted wavelengths for OII
        l1 = dblt_mu[0] * (1 + rdsh)
        l2 = dblt_mu[1] * (1 + rdsh)

        sig = np.sqrt(sig_g**2 + sig_i**2)
        norm = (sig * np.sqrt(2 * np.pi))

        lm_y1 = c + (i_val1 / norm) * np.exp(-(ot_x - l1)**2 / (2 * sig**2))
        lm_y2 = c + (i_val2 / norm) * np.exp(-(ot_x - l2)**2 / (2 * sig**2))

        plt.plot(ot_x, lm_y1, linewidth=0.5, color="#e64a19", alpha=0.7)
        plt.plot(ot_x, lm_y2, linewidth=0.5, color="#1a237e", alpha=0.7)

        # plotting signal-to-noise straight line and gaussian to verify it works
        sn_line = df_data['sn_line']
        sn_gauss = df_data['sn_gauss']

        plt.title(r'\textbf{OII doublet region}', fontsize=13)
        plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
        plt.ylabel(r'\textbf{Flux}', fontsize=13)
        plt.ylim([-500, 5000])  # setting manual limits for now
        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # plotting pPXF data
        # defining wavelength as the x-axis
        x_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                         str(int(cube_id)) + "_lamgal.npy")

        # defining the flux from the data and model
        y_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                         str(int(cube_id)) + "_flux.npy")
        y_model = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                          str(int(cube_id)) + "_model.npy")

        # scaled down y data
        y_data_scaled = y_data / np.median(y_data)

        # opening cube to obtain the segmentation data
        cube_file = (
            "/Volumes/Jacky_Cao/University/level4/project/cubes_better/cube_" +
            str(cube_id) + ".fits")
        hdu = fits.open(cube_file)
        segmentation_data = hdu[2].data
        seg_loc_rows, seg_loc_cols = np.where(segmentation_data == cube_id)
        signal_pixels = len(seg_loc_rows)

        # noise spectra will be used as in the chi-squared calculation
        noise = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" +
                        str(int(cube_id)) + "_noise.npy")
        noise_median = np.median(noise)
        noise_stddev = np.std(noise)

        residual = y_data_scaled - y_model
        res_median = np.median(residual)
        res_stddev = np.std(residual)

        noise = noise

        mask = ((residual < res_stddev) & (residual > -res_stddev))

        chi_sq = (y_data_scaled[mask] - y_model[mask])**2 / noise[mask]**2
        total_chi_sq = np.sum(chi_sq)

        total_points = len(chi_sq)
        reduced_chi_sq = total_chi_sq / total_points

        # spectral lines
        sl = spectra_data.spectral_lines()

        # parameters from lmfit
        lm_params = spectra_data.lmfit_data(cube_id)
        c = lm_params['c']
        i1 = lm_params['i1']
        i2 = lm_params['i2']
        sigma_gal = lm_params['sigma_gal']
        z = lm_params['z']
        sigma_inst = lm_params['sigma_inst']

        plt.figure()

        plt.plot(x_data, y_data_scaled, linewidth=1.1, color="#000000")
        plt.plot(x_data,
                 y_data_scaled + noise_stddev,
                 linewidth=0.1,
                 color="#616161",
                 alpha=0.1)
        plt.plot(x_data,
                 y_data_scaled - noise_stddev,
                 linewidth=0.1,
                 color="#616161",
                 alpha=0.1)

        # plotting over the OII doublet
        doublets = np.array([3727.092, 3728.875])
        #dblt_av = np.average(doublets) * (1+z)
        dblt_av = np.average(doublets)

        dblt_x_mask = ((x_data > dblt_av - 20) & (x_data < dblt_av + 20))
        doublet_x_data = x_data[dblt_x_mask]
        doublet_data = f_doublet(doublet_x_data, c, i1, i2, sigma_gal, z,
                                 sigma_inst)
        doublet_data = doublet_data / np.median(y_data)
        plt.plot(doublet_x_data, doublet_data, linewidth=0.5, color="#9c27b0")

        max_y = np.max(y_data_scaled)
        # plotting spectral lines
        for e_key, e_val in sl['emis'].items():
            spec_line = float(e_val)
            #spec_line = float(e_val) * (1+z)
            spec_label = e_key

            if (e_val in str(doublets)):
                alpha_line = 0.2
            else:
                alpha_line = 0.7

            alpha_text = 0.75

            plt.axvline(x=spec_line,
                        linewidth=0.5,
                        color="#1e88e5",
                        alpha=alpha_line)
            plt.text(spec_line - 3,
                     max_y,
                     spec_label,
                     rotation=-90,
                     alpha=alpha_text,
                     weight="bold",
                     fontsize=15)

        for e_key, e_val in sl['abs'].items():
            spec_line = float(e_val)
            #spec_line = float(e_val) * (1+z)
            spec_label = e_key

            plt.axvline(x=spec_line, linewidth=0.5, color="#ff8f00", alpha=0.7)
            plt.text(spec_line - 3,
                     max_y,
                     spec_label,
                     rotation=-90,
                     alpha=0.75,
                     weight="bold",
                     fontsize=15)

        # iron spectral lines
        for e_key, e_val in sl['iron'].items():
            spec_line = float(e_val)
            #spec_line = float(e_val) * (1+z)

            plt.axvline(x=spec_line, linewidth=0.5, color="#bdbdbd", alpha=0.3)

        plt.plot(x_data, y_model, linewidth=1.5, color="#b71c1c")

        residuals_mask = (residual > res_stddev)
        rmask = residuals_mask

        #plt.scatter(x_data[rmask], residual[rmask], s=3, color="#f44336", alpha=0.5)
        plt.scatter(x_data[mask], residual[mask] - 1, s=3, color="#43a047")

        plt.tick_params(labelsize=13)
        plt.title(r'\textbf{Spectra with pPXF overlayed}', fontsize=13)
        plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
        plt.ylabel(r'\textbf{Relative Flux}', fontsize=13)
        plt.tight_layout()
        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # Voigt fitted region
        # Running pPXF fitting routine
        best_fit = ppxf_fitter_kinematics_sdss.kinematics_sdss(
            cube_id, 0, "all")
        best_fit_vars = best_fit['variables']

        data_wl = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" +
                          str(int(cube_id)) + "_cbd_x.npy")  # 'x-data'
        data_spec = np.load("cube_results/cube_" + str(int(cube_id)) +
                            "/cube_" + str(int(cube_id)) +
                            "_cbs_y.npy")  # 'y-data'

        # y-data which has been reduced down by median during pPXF running
        galaxy = best_fit['y_data']

        model_wl = np.load("ppxf_results/cube_" + str(int(cube_id)) +
                           "/cube_" + str(int(cube_id)) + "_lamgal.npy")
        model_spec = np.load("ppxf_results/cube_" + str(int(cube_id)) +
                             "/cube_" + str(int(cube_id)) + "_model.npy")

        # parameters from lmfit
        lm_params = spectra_data.lmfit_data(cube_id)
        z = lm_params['z']
        sigma_inst = lm_params['sigma_inst']

        # masking out the region of CaH and CaK
        calc_rgn = np.array([3900, 4000])

        data_rgn = calc_rgn * (1 + z)
        data_mask = ((data_wl > data_rgn[0]) & (data_wl < data_rgn[1]))
        data_wl_masked = data_wl[data_mask]
        data_spec_masked = data_spec[data_mask]

        data_spec_masked = data_spec_masked / np.median(data_spec_masked)

        model_rgn = calc_rgn
        model_mask = ((model_wl > calc_rgn[0]) & (model_wl < calc_rgn[1]))
        model_wl_masked = model_wl[model_mask]
        model_spec_masked = model_spec[model_mask]

        z_wl_masked = model_wl_masked * (1 + z)  # redshifted wavelength range
        galaxy_masked = galaxy[model_mask]

        # Applying the lmfit routine to fit two Voigt profiles over our spectra data
        vgt_pars = Parameters()
        vgt_pars.add('sigma_inst', value=sigma_inst, vary=False)
        vgt_pars.add('sigma_gal', value=1.0, min=0.0)

        vgt_pars.add('z', value=z)

        vgt_pars.add('v1_amplitude', value=-0.1, max=0.0)
        vgt_pars.add('v1_center', expr='3934.777*(1+z)')
        vgt_pars.add('v1_sigma',
                     expr='sqrt(sigma_inst**2 + sigma_gal**2)',
                     min=0.0)
        #vgt_pars.add('v1_gamma', value=0.01)

        vgt_pars.add('v2_amplitude', value=-0.1, max=0.0)
        vgt_pars.add('v2_center', expr='3969.588*(1+z)')
        vgt_pars.add('v2_sigma', expr='v1_sigma')
        #vgt_pars.add('v2_gamma', value=0.01)

        vgt_pars.add('c', value=0)

        voigt = VoigtModel(prefix='v1_') + VoigtModel(
            prefix='v2_') + ConstantModel()

        vgt_result = voigt.fit(galaxy_masked, x=z_wl_masked, params=vgt_pars)

        opt_pars = vgt_result.best_values
        best_fit = vgt_result.best_fit

        # Plotting the spectra
        fig, ax = plt.subplots()
        ax.plot(z_wl_masked, galaxy_masked, lw=1.5, c="#000000", alpha=0.3)
        ax.plot(z_wl_masked, model_spec_masked, lw=1.5, c="#00c853")
        ax.plot(z_wl_masked, best_fit, lw=1.5, c="#e53935")

        ax.tick_params(labelsize=13)
        ax.set_ylabel(r'\textbf{Relative Flux}', fontsize=13)
        ax.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)

        plt.title(r'\textbf{Voigt Fitted Region}', fontsize=15)
        fig.tight_layout()
        pdf.savefig()
        plt.close()

        # ---------------------------------------------------------------------- #

        # Values for diagnostics
        catalogue = np.load("data/matched_catalogue.npy")
        cat_loc = np.where(catalogue[:, 0] == cube_id)[0]

        cube_data = catalogue[cat_loc][0]
        vmag = cube_data[5]

        sigma_sn_data = np.load("data/ppxf_fitter_data.npy")
        sigma_sn_loc = np.where(sigma_sn_data[:][:, 0][:, 0] == cube_id)[0]

        ss_indiv_data = sigma_sn_data[sigma_sn_loc][0][0]
        ssid = ss_indiv_data

        plt.figure()
        plt.title('Variables and numbers for cube ' + str(cube_id),
                  fontsize=15)
        plt.text(0.0, 0.9, "HST V-band magnitude: " + str(vmag))
        plt.text(0.0, 0.85, "S/N from spectra: " + str(ssid[7]))

        plt.text(0.0, 0.75, "OII sigma lmfit: " + str(ssid[1]))
        plt.text(0.0, 0.7, "OII sigma pPXF: " + str(ssid[5]))

        plt.text(0.0, 0.6, "Voigt sigma lmfit: " + str(ssid[11]))
        plt.text(0.0, 0.55, "Voigt sigma pPXF: " + str(ssid[10]))

        plt.axis('off')
        pdf.savefig()
        plt.close()

        # We can also set the file's metadata via the PdfPages object:
        d = pdf.infodict()
        d['Title'] = 'cube_' + str(cube_id) + ' diagnostics'
        d['Author'] = u'Jacky Cao'
        #d['Subject'] = 'How to create a multipage pdf file and set its metadata'
        #d['Keywords'] = 'PdfPages multipage keywords author title subject'
        #d['CreationDate'] = datetime.datetime(2009, 11, 13)
        d['CreationDate'] = datetime.datetime.today()
Exemple #12
0
        def singular_plot():
            # parameters from lmfit
            lm_params = spectra_data.lmfit_data(cube_id)
            c = lm_params['c']
            i1 = lm_params['i1']
            i2 = lm_params['i2']
            sigma_gal = lm_params['sigma_gal']
            z = lm_params['z']
            sigma_inst = lm_params['sigma_inst']

            f, (ax1, ax2, ax3)  = plt.subplots(1, 3, 
                    gridspec_kw={'width_ratios':[1,1,3]},figsize=(8,2))

            g, (gax1, gax2, gax3) = plt.subplots(1,3,
                    gridspec_kw={'width_ratios':[1,2,2]}, figsize=(12,4))

            #ax1.set_title(r'\textbf{-}')
            ax1.axis('off')
            ax1.text(0.0, 0.9, "cube\_" + str(cube_id), fontsize=13)
            ax1.text(0.0, 0.7, "sigma\_star (km/s): " + 
                    str("{:.1f}".format(sigma_stars)), fontsize=13)
            ax1.text(0.0, 0.55, "sigma\_OII (km/s): " + 
                    str("{:.1f}".format(vel_dispersion)), fontsize=13)

            gax1.axis('off')
            gax1.text(0.0, 0.9, "cube\_" + str(cube_id), fontsize=13)
            gax1.text(0.0, 0.8, "sigma\_star (km/s): " + 
                    str("{:.1f}".format(sigma_stars)), fontsize=13)
            gax1.text(0.0, 0.75, "sigma\_OII (km/s): " + 
                    str("{:.1f}".format(vel_dispersion)), fontsize=13)

            gax1.text(0.0, 0.65, "OII fit outputs: ", fontsize=13)
            gax1.text(0.0, 0.6, "sigma\_gal: " + 
                    str("{:.5f}".format(sigma_gal)), fontsize=13)
            gax1.text(0.0, 0.55, "sigma\_inst: " + 
                    str("{:.5f}".format(sigma_inst)), fontsize=13)


            ax2.set_title(r'\textbf{MUSE}')
            ax2.axis('off') 
            fits_file = ("/Volumes/Jacky_Cao/University/level4/project/" +
                    "cubes_better/cube_" + str(cube_id) + ".fits")
            im_coll_data = cube_reader.image_collapser(fits_file)
            ax2.imshow(im_coll_data['median'], cmap='gray_r')

            # plotting pPXF data
            # defining wavelength as the x-axis
            x_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + 
                    "/cube_" + str(int(cube_id)) + "_lamgal.npy")

            # defining the flux from the data and model
            y_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + 
                    "/cube_" + str(int(cube_id)) + "_flux.npy")
            y_model = np.load("ppxf_results/cube_" + str(int(cube_id)) + 
                    "/cube_" + str(int(cube_id)) + "_model.npy")

            # scaled down y data 
            y_data_scaled = y_data/np.median(y_data) 

            # spectral lines
            sl = spectra_data.spectral_lines() 

            ax3.plot(x_data, y_data_scaled, linewidth=0.7, color="#000000")

            gax2.plot(x_data, y_data_scaled, linewidth=0.7, color="#000000")

            max_y = np.max(y_data_scaled)
            # plotting spectral lines
            for e_key, e_val in sl['emis'].items():
                spec_line = float(e_val)*(1+z)
                spec_label = e_key

                alpha_line = 0.7                            
                alpha_text = 0.75

                ax3.axvline(x=spec_line, linewidth=0.5, color="#1e88e5", 
                        alpha=alpha_line) 
                gax2.axvline(x=spec_line, linewidth=0.5, color="#1e88e5", 
                        alpha=alpha_line)
                gax3.axvline(x=spec_line, linewidth=0.5, color="#1e88e5", 
                        alpha=alpha_line)

            for e_key, e_val in sl['abs'].items():
                spec_line = float(e_val)*(1+z)
                spec_label = e_key

                ax3.axvline(x=spec_line, linewidth=0.5, color="#ff8f00", 
                        alpha=0.7)
                gax2.axvline(x=spec_line, linewidth=0.5, color="#ff8f00", 
                        alpha=0.7)
                gax3.axvline(x=spec_line, linewidth=0.5, color="#1e88e5", 
                        alpha=alpha_line)

            # iron spectral lines
            for e_key, e_val in sl['iron'].items(): 
                spec_line = float(e_val)*(1+z)

                ax3.axvline(x=spec_line, linewidth=0.5, color="#bdbdbd", 
                        alpha=0.3)
                gax2.axvline(x=spec_line, linewidth=0.5, color="#bdbdbd", 
                        alpha=0.3)

            ax3.plot(x_data, y_model, linewidth=0.7, color="#b71c1c")

            ax3.tick_params(labelsize=13)
            ax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
            ax3.set_ylabel(r'\textbf{Relative Flux}', fontsize=13)
           
            ax3.set_xlim([3500*(1+z), 4000*(1+z)]) # 3500Å to 4000Å

            gax2.plot(x_data, y_model, linewidth=0.7, color="#b71c1c") 

            xd_range = [np.min(x_data), np.max(x_data)]
            xd = np.linspace(xd_range[0], xd_range[1], 4000)

            # Plotting OII doublet 
            doublet_data = spectra_data.f_doublet(xd*(1+z), c, i1, i2, sigma_gal, z, 
                    sigma_inst)
            ppxf_no_scale = np.load("ppxf_results/cube_" + str(int(cube_id)) + 
                    "/cube_" + str(int(cube_id)) + "_not_scaled.npy")
            gax2.plot(xd, doublet_data/np.median(ppxf_no_scale), lw=0.5, 
                    color="#7b1fa2")

            # Plotting inidividual Gaussians which make the doublet
            dblt_mu = [3727.092, 3729.875] # non-redshifted wavelengths for OII
            l1 = dblt_mu[0] * (1+z)
            l2 = dblt_mu[1] * (1+z)
            
            sig = np.sqrt(sigma_gal**2 + sigma_inst**2) 
            norm = (sig*np.sqrt(2*np.pi))

            x_dat = xd * (1+z)

            lm_y1 = c + ( i1 / norm ) * np.exp(-(x_dat-l1)**2/(2*sig**2))
            lm_y2 = c + ( i2 / norm ) * np.exp(-(x_dat-l2)**2/(2*sig**2))

            gax2.plot(xd, lm_y1/np.median(ppxf_no_scale), linewidth=0.5, 
                    color="#8bc34a", alpha=0.7) 
            gax2.plot(xd, lm_y2/np.median(ppxf_no_scale), linewidth=0.5, 
                    color="#1e88e5", alpha=0.7)

            gax2.tick_params(labelsize=13)
            gax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
            gax2.set_ylabel(r'\textbf{Relative Flux}', fontsize=13)
           
            gax2.set_xlim([3700*(1+z), 4000*(1+z)]) # 3700Å to 4000Å

            # Zoomed in plot on the doublet region
            gax3.plot(x_data, y_data_scaled, linewidth=0.7, color="#000000")
            gax3.plot(xd, doublet_data/np.median(ppxf_no_scale), lw=0.5, 
                    color="#7b1fa2")

            gax3.plot(xd, lm_y1/np.median(ppxf_no_scale), linewidth=0.5, 
                    color="#8bc34a", alpha=0.7) 
            gax3.plot(xd, lm_y2/np.median(ppxf_no_scale), linewidth=0.5, 
                    color="#1e88e5", alpha=0.7)

            gax3.tick_params(labelsize=13)
            gax3.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13)
            gax3.set_ylabel(r'\textbf{Relative Flux}', fontsize=13)
           
            gax3.set_xlim([3700*(1+z), 3750*(1+z)]) # 3700Å to 4000Å

            f.tight_layout()
            f.savefig("diagnostics/single_page/"+str(int(i_cube))+"_cube_"+
                    str(cube_id)+".pdf")

            g.tight_layout()
            g.savefig("diagnostics/single_page_spectra/"+str(int(i_cube))+
                    "_cube_"+str(cube_id)+"_spectra.pdf")

            plt.close("all")