コード例 #1
0
def lumdis(z, key):
    Ho = cosmology[key].H
    mo = cosmology[key].Om
    deo = cosmology[key].Ode
    l = cosmology[key].Ok
    cos = LambdaCDM(H0=Ho, Om0=mo, Ode0=l)
    return cos.luminosity_distance(z)
コード例 #2
0
def lumdis(z, key):
    Ho = cosmology[key].H  #hubble constant
    mo = cosmology[key].Om  #matter
    l = cosmology[key].Ol  #dark energyH
    k = cosmology[key].Ok  #curvature
    cos = LambdaCDM(H0=Ho, Om0=mo, Ode0=l)  #Ok0 = k should work but it doesn't
    return cos.luminosity_distance(z)
コード例 #3
0
def calc_absmag(rmag, galZ, gmag, imag, h, O_matter, O_lambda):

    # Calculating the distance modulus
    cosmo = LambdaCDM(H0=h * 100, Om0=O_matter, Ode0=O_lambda)
    galDl = (cosmo.luminosity_distance(galZ).to('pc')).value
    DM = 5. * np.log10(galDl / 10.)

    # Calculating the K-corrections
    kcorfile = np.loadtxt('kcorrection_list.txt').T
    zbins = kcorfile[5]
    kparams = kcorfile[6:9]

    # Calculating the K-correction per redshift bin
    galKcor = np.zeros(len(galZ))
    for k in range(len(zbins)):

        zmask = (zbins[k] - 0.005 <= galZ) & (galZ < zbins[k] + 0.005)

        a, b, c = kparams[:, k]
        Kcor = a * (gmag - imag)**2 + b * (gmag - imag) + c

        galKcor[zmask] = Kcor[zmask]

    # Calculating the absolute magnitude
    rmag_abs = rmag - DM + galKcor

    return rmag_abs
コード例 #4
0
halos = halos[mregion]

# ----------------------
# MICE PARAMETERS
# ----------------------

z_h = halos.z_v

mg_h = halos.g_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)
mr_h = halos.r_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)
mi_h = halos.i_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)
mz_h = halos.z_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)

lMH = np.log10(10**(halos.log_m) * 0.7)

Dl_h = np.array(cosmo.luminosity_distance(z_h).value) * 1.e6

Mg_h = mg_h + 5.0 - 5.0 * np.log10(Dl_h)
Mr_h = mr_h + 5.0 - 5.0 * np.log10(Dl_h)
Mi_h = mi_h + 5.0 - 5.0 * np.log10(Dl_h)
Mz_h = mz_h + 5.0 - 5.0 * np.log10(Dl_h)

mh = (z_h > 0.2) * (z_h < 0.65) * (lMH > 13.77
                                   )  # SELECT HALOS TO MIMIC redMaPPer

Dcosmo = cosmo.comoving_distance(z_h[mh])
catalog = SkyCoord(ra=halos.ra[mh] * u.degree,
                   dec=halos.dec[mh] * u.degree,
                   distance=Dcosmo)
idx, Rprox_h2, Rprox_h = catalog.match_to_catalog_3d(catalog, nthneighbor=2)
Rprox_h = np.array(Rprox_h.value)
コード例 #5
0
    def main(self):

        ### UNPACKING THE HEALPIX FILES ###

        hpx_path = "%s/%s" % (self.options.healpix_dir,
                              self.options.healpix_file)

        # If you specify a telescope that's not in the default list, you must provide the rest of the information
        detector = None
        is_error = False
        is_custom_percentile = False
        custom_percentile = None

        # Check the inputs for errors...
        if self.options.telescope_abbreviation not in self.telescope_mapping.keys(
        ):
            print(
                "Running for custom telescope. Checking required parameters..."
            )

            if self.options.telescope_abbreviation == "":
                is_error = True
                print(
                    "For custom telescope, `telescope_abbreviation` is required!"
                )

            if self.options.telescope_name == "":
                is_error = True
                print("For custom telescope, `telescope_name` is required!")

            if self.options.detector_width_deg <= 0.0:
                is_error = True
                print(
                    "For custom telescope, `detector_width_deg` is required, and must be > 0!"
                )

            if self.options.detector_height_deg <= 0.0:
                is_error = True
                print(
                    "For custom telescope, `detector_height_deg` is required, and must be > 0!"
                )

            if not is_error:
                detector = Detector(self.options.telescope_name,
                                    self.options.detector_width_deg,
                                    self.options.detector_height_deg)
        else:
            detector = self.telescope_mapping[
                self.options.telescope_abbreviation]

        if self.options.percentile > 0.9:
            is_error = True
            print("User-defined percentile must be <= 0.90")
        elif self.options.percentile > 0.0:
            is_custom_percentile = True
            custom_percentile = self.options.percentile

        if is_error:
            print("Exiting...")
            return 1

        print("\n\nTelescope: `%s -- %s`, width: %s [deg]; height %s [deg]" %
              (self.options.telescope_abbreviation, detector.name,
               detector.deg_width, detector.deg_height))
        print("%s FOV area: %s" % (detector.name,
                                   (detector.deg_width * detector.deg_height)))

        if is_custom_percentile:
            print("\nTiling to %s percentile." % self.options.percentile)

        t1 = time.time()

        print("Unpacking '%s':%s..." % (self.options.gw_id, hpx_path))
        prob, distmu, distsigma, distnorm, header_gen = hp.read_map(
            hpx_path, field=(0, 1, 2, 3), h=True)
        header = dict(header_gen)

        t2 = time.time()

        print("\n********* start DEBUG ***********")
        print("`Unpacking healpix` execution time: %s" % (t2 - t1))
        print("********* end DEBUG ***********\n")

        npix = len(prob)
        print("\nNumber of pix in '%s': %s" % (hpx_path, len(prob)))

        sky_area = 4 * 180**2 / np.pi
        area_per_px = sky_area / npix
        print("Sky Area per pix in '%s': %s [sq deg]" %
              (hpx_path, area_per_px))

        sky_area_radians = 4 * np.pi
        steradian_per_pix = sky_area_radians / npix
        pixel_radius_radian = np.sqrt(steradian_per_pix / np.pi)
        print("Steradian per px for '%s': %s" % (hpx_path, steradian_per_pix))

        # nside = the lateral resolution of the HEALPix map
        nside = hp.npix2nside(npix)
        print("Resolution (nside) of '%s': %s\n" % (hpx_path, nside))

        print("Processing for %s..." % detector.name)
        num_px_per_field = (detector.deg_height *
                            detector.deg_width) / area_per_px
        print("Pix per (%s) field for '%s': %s" %
              (detector.name, hpx_path, num_px_per_field))

        percentile = self.options.percentile
        unpacked_healpix = Unpacked_Healpix(
            hpx_path,
            prob,
            distmu,
            distsigma,
            distnorm,
            header,
            nside,
            npix,
            area_per_px,
            linestyle="-",
            compute_contours=False,
            custom_percentile=custom_percentile)

        num_50 = len(unpacked_healpix.indices_of_50)
        num_90 = len(unpacked_healpix.indices_of_90)

        area_50 = num_50 * area_per_px
        area_90 = num_90 * area_per_px

        # Debug -- should match Grace DB statistics
        print("\nArea of 50th: %s" % area_50)
        print("Area of 90th: %s\n" % area_90)

        if is_custom_percentile:
            num_custom = len(unpacked_healpix.pixels_custom)
            area_custom = num_custom * area_per_px
            print("\n[User defined] Area of %s: %s\n" %
                  (custom_percentile, area_custom))

        ### GENERATE THE ALL-SKY TILING FOR A DETECTOR & GET THE ENCLOSED GALAXIES ###

        print("Generating all sky coords for %s" % detector.name)
        detector_all_sky_coords = Cartographer.generate_all_sky_coords(
            detector)
        base_cartography = Cartographer(self.options.gw_id,
                                        unpacked_healpix,
                                        detector,
                                        detector_all_sky_coords,
                                        generate_tiles=True)

        # Save cartograpy
        with open(
                '%s/%s_base_cartography.pkl' %
            (self.options.working_dir, self.options.gw_id), 'wb') as handle:
            pickle.dump(base_cartography,
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)

        if not self.options.skip_completeness:
            # Build the spatial query
            t1 = time.time()

            # Join all tiles together into a composite polygon to simplify the SQL query
            net_polygon = []
            for t in base_cartography.tiles:
                net_polygon += t.query_polygon
            joined_poly = unary_union(net_polygon)

            # Fix any seams
            eps = 0.00001
            merged_poly = joined_poly.buffer(
                eps, 1, join_style=JOIN_STYLE.mitre).buffer(
                    -eps, 1, join_style=JOIN_STYLE.mitre)

            try:
                print("Number of sub-polygons in query: %s" % len(merged_poly))
            except TypeError as e:
                print("Warning...")
                print(e)
                print(
                    "\nOnly one polygons in query! Wrapping `merged_poly` and resuming..."
                )

                merged_poly = [merged_poly]

            # Create and save sql_poly
            sql_poly = SQL_Polygon(merged_poly, detector)
            with open(
                    '%s/%s_sql_poly.pkl' %
                (self.options.working_dir, self.options.gw_id),
                    'wb') as handle:
                pickle.dump(sql_poly, handle, protocol=pickle.HIGHEST_PROTOCOL)

            # # Use the sql_poly string to create the WHERE clause
            mp_where = "ST_WITHIN(Coord, ST_GEOMFROMTEXT('"
            mp_where += sql_poly.query_polygon_string
            mp_where += "', 4326));"

            t2 = time.time()

            print("\n********* start DEBUG ***********")
            print("`Generating multipolygon` execution time: %s" % (t2 - t1))
            print("********* end DEBUG ***********\n")

            # Database I/O
            t1 = time.time()

            query = "SELECT * from GalaxyDistance2 WHERE z_dist IS NOT NULL AND z_dist_err IS NOT NULL AND B IS NOT NULL AND "
            query += mp_where

            print(query)
            print("\n*****************************\n")

            # Save cartograpy
            with open(
                    '%s/%s_query.pkl' %
                (self.options.working_dir, self.options.gw_id),
                    'wb') as handle:
                pickle.dump(query, handle, protocol=pickle.HIGHEST_PROTOCOL)

            result = QueryDB(query, port=LOCAL_PORT)

            t2 = time.time()

            print("\n********* start DEBUG ***********")
            print("`Query database` execution time: %s" % (t2 - t1))
            print("********* end DEBUG ***********\n")

            # Instantiate galaxies
            contained_galaxies = []

            # What is the angular radius for our given map?
            cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
            pixel_diameter_arcsec = np.degrees(
                pixel_radius_radian) * 2.0 * 3600.0
            proper_radius = 20.0  # kpc

            z = np.linspace(1e-18, 0.3, 1000)
            arcsec_of_proper_diam = cosmo.arcsec_per_kpc_proper(
                z) * proper_radius * 2.0
            z_index = find_nearest(arcsec_of_proper_diam,
                                   pixel_diameter_arcsec)
            target_z = z[z_index]
            dist_cuttoff = cosmo.luminosity_distance(target_z).value  # Mpc

            print("Redshift cutoff: %s" % target_z)
            print("Distance cutoff: %s" % dist_cuttoff)

            for row in result:
                g = glade_galaxy(row, base_cartography.unpacked_healpix, cosmo,
                                 dist_cuttoff, proper_radius)
                contained_galaxies.append(g)

            print("Query returned %s galaxies" % len(contained_galaxies))

            avg_dist = average_distance_prior(
                base_cartography.unpacked_healpix)
            catalog_completeness = GLADE_completeness(avg_dist)
            print("Completeness: %s" % catalog_completeness)

            ### REDISTRIBUTE THE PROBABILITY IN THE MAP TO THE GALAXIES ###

            print("Assigning relative prob...")
            Cartographer.assign_galaxy_relative_prob(
                base_cartography.unpacked_healpix, contained_galaxies,
                base_cartography.cumlative_prob_in_tiles, catalog_completeness)

            # Save contained galaxies
            with open(
                    '%s/%s_contained_galaxies.pkl' %
                (self.options.working_dir, self.options.gw_id),
                    'wb') as handle:
                pickle.dump(contained_galaxies,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

            print("Redistribute prob...")
            # redistributed_map
            redistributed_prob, enclosed_indices = Cartographer.redistribute_probability(
                base_cartography.unpacked_healpix, contained_galaxies,
                base_cartography.tiles, catalog_completeness)

            # Copy base cartography (have to do this?) and update the prob of the 90th perecentil
            redistributed_cartography = copy.deepcopy(base_cartography)
            redistributed_cartography.unpacked_healpix.prob = redistributed_prob
            redistributed_cartography.unpacked_healpix.pixels_90 = [
                Pixel_Element(
                    ei, redistributed_cartography.unpacked_healpix.nside,
                    redistributed_cartography.unpacked_healpix.prob[ei])
                for ei in enclosed_indices
            ]

            # Update the original tiles with the new probability
            redistributed_cartography.assign_tiles(base_cartography.tiles)
            # Save cartograpy
            with open(
                    '%s/%s_redstributed_cartography.pkl' %
                (self.options.working_dir, self.options.gw_id),
                    'wb') as handle:
                pickle.dump(redistributed_cartography,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

        else:
            print("\n\nPure tiling...")

        ### SAVE THE RESULTS ###
        def GetSexigesimalString(c):
            ra = c.ra.hms
            dec = c.dec.dms

            ra_string = "%02d:%02d:%05.2f" % (ra[0], ra[1], ra[2])
            if dec[0] >= 0:
                dec_string = "+%02d:%02d:%05.2f" % (dec[0], np.abs(
                    dec[1]), np.abs(dec[2]))
            else:
                dec_string = "%03d:%02d:%05.2f" % (dec[0], np.abs(
                    dec[1]), np.abs(dec[2]))

            # Python has a -0.0 object. If the deg is this (because object lies < 60 min south), the string formatter will drop the negative sign
            if c.dec < 0.0 and dec[0] == 0.0:
                dec_string = "-00:%02d:%05.2f" % (np.abs(dec[1]), np.abs(
                    dec[2]))
            return (ra_string, dec_string)

        tiles_to_serialize = None
        if not self.options.skip_completeness:
            tiles_to_serialize = redistributed_cartography.tiles
        else:
            tiles_to_serialize = base_cartography.tiles

        t1 = time.time()
        sorted_tiles = sorted(tiles_to_serialize,
                              key=lambda x: x.net_prob,
                              reverse=True)

        with open(
                '%s/%s_%s_%s_Tiles.txt' %
            (self.options.working_dir, base_cartography.gwid,
             self.options.schedule_designation, detector.name),
                'w') as csvfile:

            csvwriter = csv.writer(csvfile)

            cols = []
            cols.append('# FieldName')
            cols.append('FieldRA')
            cols.append('FieldDec')
            cols.append('Telscope')
            cols.append('Filter')
            cols.append('ExpTime')
            cols.append('Priority')
            cols.append('Status')
            csvwriter.writerow(cols)

            for i, st in enumerate(sorted_tiles):

                c = coord.SkyCoord(st.ra_deg, st.dec_deg, unit=(u.deg, u.deg))
                coord_str = GetSexigesimalString(c)

                cols = []
                field_name = "%s%s%sE%s" % (
                    self.options.telescope_abbreviation,
                    str(self.options.event_number).zfill(3),
                    self.options.schedule_designation, str(i + 1).zfill(5))

                cols.append(field_name)
                cols.append(coord_str[0])
                cols.append(coord_str[1])
                cols.append(detector.name)
                cols.append(self.options.filter)
                cols.append(self.options.exp_time)
                cols.append(st.net_prob)
                cols.append('False')
                csvwriter.writerow(cols)

            print("Done")

        t2 = time.time()

        print("\n********* start DEBUG ***********")
        print("`Serialize tiles` execution time: %s" % (t2 - t1))
        print("********* end DEBUG ***********\n")
ax[1].hist(clusters.LAMBDA, 50, histtype='step', color='C5')
ax[1].set_xlabel(r'$\lambda$', fontsize=14)
ax[1].axvline(27.982, c='C4')
ax[1].set_ylabel(r'$N$')
plt.subplots_adjust(hspace=0.25)
ax[1].axis([20, 150, 0., 400])
# f.subplots_adjust(hspace=0,wspace=0)
plt.savefig(folder + 'hist.eps', format='eps', bbox_inches='tight')
#######################################

D_ang = np.array(cosmo.angular_diameter_distance(zc))
kpcscale = D_ang * (((1.0 / 3600.0) * np.pi) / 180.0) * 1000.0
KPCSCALE = np.repeat(kpcscale, c)

zc = np.repeat(zc, c)
D_lum = np.array(cosmo.luminosity_distance(zc)) * 1.e6
MAG_abs = members.MODEL_MAG_R + 5.0 - 5.0 * np.log10(D_lum)
Lum_r = 10.**(-0.4 * MAG_abs)

c_gr = members.MODEL_MAG_G - members.MODEL_MAG_R
c_ri = members.MODEL_MAG_R - members.MODEL_MAG_I

mcen = R_cen == 0.

RA0 = np.repeat(RA[mcen], c)
DEC0 = np.repeat(DEC[mcen], c)

t = np.repeat(angles['theta'], c)
twl = np.repeat(angles['theta_wlum'], c)
twd = np.repeat(angles['theta_wd'], c)
tp = np.repeat(angles['theta_pcut'], c)
コード例 #7
0
ファイル: tools_and_constants.py プロジェクト: dczheng/CRE
Omega0 = 0.302
OmegaLambda = 0.698
OmegaBaryon = 0.04751
HubbleParam = 0.68

gadget_length_in_cm = Kpc / HubbleParam
gadget_mass_in_g = 1e10 * Msun / HubbleParam
gadget_velocity_in_cm_per_s = 1e5
gadget_time_in_s = gadget_length_in_cm / gadget_velocity_in_cm_per_s
gadget_energy_in_erg = gadget_mass_in_g * gadget_length_in_cm**2 / (
    gadget_time_in_s**2)

cosmology = LambdaCDM(H0=HubbleParam * 100, Om0=Omega0, Ode0=OmegaLambda)

D_c = lambda z: cosmology.comoving_distance(z).cgs.value
D_a = lambda z: cosmology.angular_diameter_distance(z).cgs.value
D_l = lambda z: cosmology.luminosity_distance(z).cgs.value

rho_crit = lambda z: cosmology.critical_density(z).cgs.value
rho_crit0 = cosmology.critical_density0.cgs.value

rho_bar_crit = lambda z: rho_crit(z) * OmegaBaryon
rho_bar_crit0 = rho_crit0 * OmegaBaryon

n_p_crit = lambda z: rho_bar_crit(z) / m_p * Xh
n_p_crit0 = rho_bar_crit0 / m_p * Xh

n_e_crit = lambda z: n_p_crit(z) * elec_frac
n_e_crit0 = n_p_crit0 * elec_frac
コード例 #8
0
ascii.write(data, 'data.txt', overwrite=True)
ascii.write(search, 'data2.txt', overwrite=True)
file = ascii.read('stars.csv')
X = LambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3, Ode0=0.7)
i1 = 0
while i1 < len(data2):
    if data2['name'][i1] not in file['NAME']:
        data.remove_row(i1)
    i1 += 1

Lum = []
MAGN = []
absMag = []
for i in data["REDSHIFT"]:
    Lum.append(X.luminosity_distance(i))
for i in Lum:
    MAGN.append((5 * np.log10(i / (10 * u.pc))))
for mag, dist_mod in zip(file['modelMag_i'], MAGN):
    absMag.append(mag - dist_mod.value)
#%%
RA = data['RA']
DECL = data['DEC']


def projection(RA,
               DECL,
               proj='aitoff',
               org=0,
               facecolor='white',
               mcolor='b',
コード例 #9
0
ascii.write(data, 'data.txt', overwrite=True)
ascii.write(search, 'data2.txt', overwrite=True)

sdss = ascii.read('star_data.csv')

i = 0
while i < len(data2):
    if data2['name'][i] not in sdss['NAME']:
        data.remove_row(i)
    i += 1

cosmo = LambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3, Ode0=0.7)

luminosity_distances = []
for r in data["REDSHIFT"]:
    luminosity_distances.append(cosmo.luminosity_distance(r))

DM = []
for d in luminosity_distances:
    DM.append((5 * np.log10(d / (10 * u.pc))))

absMag = []
for mag, dist_mod in zip(sdss['modelMag_i'], DM):
    absMag.append(mag - dist_mod.value)
#%%
ra = data['RA']
dec = data['DEC']


def projection(ra,
               dec,
コード例 #10
0
ファイル: Downhill_HW2_2.py プロジェクト: rock211/notebook_wr
def Chi(i, j):
    d_L = LambdaCDM(H0=70, Om0=i, Ode0=j)
    d_L = d_L.luminosity_distance(z).value
    DM_m = 5 * np.log10(d_L) + 25
    chisq = ((DM - DM_m) / DM_err)**2
    return np.sum(chisq)
コード例 #11
0
def lense(catalogue,
          lens_z,
          searchlensed,
          file_dfx,
          file_dfy,
          LL,
          pixsize=0.25,
          SLregime=2,
          main_source_z=None,
          DefField_Norm=None):
    '''
	This is the lensing code developed by Adi and Alfredo and ported over to python. The purpose of this is to apply lensing to a
	simulation of point sources given some deflection shields.

	Inputs:
	catalogue (npy filename)- the catalogue of point sources to be lensed, should have locations in arcseconds and centered at (0,0)
	#format should be a rec array with keys of RA (arcs), DEC (arcs), Flux (Jy), Redshift.
	lens_z (float_)- the red shift of the cluster.
	searchlensed (float)- parameter to determine if lensed sources can be extended
	file_dfx (txt filename)- the deflection field in x
	file_dfy (txt filename)- the deflection field in y
	LL (float)- the area of the map in arcminutes (assumes a square)
	pixsize (float) - the pixelsize (arcseconds) of the deflection field
	SLregime (float) - the area of the strong lensing regime in arcminutes (assumed to be at center of map)
	main_source_z (float)- redshift used to create the deflection fields
	DefField_Norm (float)- a parameter used if main_source_z is not given (can be calculated with main_source_z if not provided)
	#if neither main_source_z or DefField_Norm are provided then DefField_Norm is assumed to be 1.
	'''

    # Define input cluster redshift, source redshift to which deflection field is scaled to, and the pixel
    # scale (both are input provided by Adi Zitrin). Often the deflection field will be scaled to DLS/DS=1.

    #if SLregime is bigger than the mapsize make the SLregime the mapsize
    if SLregime > LL:
        SLregime = LL

    print('SLregime :', SLregime)
    SL_pix = SLregime * 60 / pixsize  #converting strong lensing regime from arcmin to pixels
    #for the weakly lensed region we don't estimate the whole image plane but limit to a WLmask x WLmask area around the lensed source
    WLmask = 2.0  # reference scale to estimate WL region in arcmin
    WLmask = WLmask / pixsize * 60.0  #WLmask now in pixels

    print('WL region (in pixels):', WLmask)

    #estimate the angular distance to the lensing cluster (z = lens_z)
    cos_model = LCDM(hb, OMm, OMl)
    DL1 = cos_model.luminosity_distance(lens_z).value / (
        1 + lens_z)**2  # * 100 to match units in Adi's code.

    print('DL1 (luminosity distance) :', DL1)

    #If this is scaled to a certain source redshift (main_sourcez) then calculate angular diameter distances
    if main_source_z != None:
        #estimate angular diameter distance to main_source_z
        DS1 = cos_model.luminosity_distance(main_source_z).value / (
            1 + main_source_z)**2
        #estimate distance between the lensing cluster and main_source_z
        DLS1 = (DS1 * (1. + main_source_z) - DL1 * (1 + lens_z)) / (
            1. + main_source_z)  # WARNING: only true for Omega_k = 0

        #estimate the deflection field normalization
        DLS1_DS1 = DLS1 / DS1

    elif DefField_Norm != None:  # if main_source_z not provided but deflection field norm was given.
        DLS1_DS1 = DefField_Norm

    else:
        DLS1_DS1 = 1.  #if main_source_z was not provided the deflection field normalization should (very likely) be 1

    #read in the deflection fields provided by Adi Zitrin, (npixel X npixel)
    wl_alpha_x = np.loadtxt(file_dfx)  #deflection field of x
    wl_alpha_y = np.loadtxt(file_dfy)  #deflection field of y

    # make sure the deflection fields are NxN
    if wl_alpha_x.shape[0] > wl_alpha_x.shape[1]:
        wl_alpha_x = wl_alpha_x[0:wl_alpha_x.shape[1], :]
        wl_alpha_y = wl_alpha_y[0:wl_alpha_X.shape[1], :]

    elif wl_alpha_x.shape[1] < wl_alpha_x.shape[0]:
        wl_alpha_x = wl_alpha_x[:, 0:wl_alpha_x.shape[0]]
        wl_alpha_y = wl_alpha_y[:, 0:wl_alpha_x.shape[0]]

    #weak lensing estimates
    wl_lengthx = len(wl_alpha_x[:, 0])
    wl_lengthy = len(wl_alpha_y[0, :])
    # Print check
    print('This should be 1 ...', wl_lengthx / wl_lengthy)

    wl_x = np.arange(start=1, stop=wl_lengthx + 1)
    wl_y = np.arange(start=1, stop=wl_lengthy + 1)

    WLY, WLX = np.meshgrid(wl_x, wl_y)

    #weak lensing gradients
    wl_dax_dy = np.zeros((wl_lengthx, wl_lengthy))
    wl_dax_dx = np.zeros((wl_lengthx, wl_lengthy))
    wl_day_dy = np.zeros((wl_lengthx, wl_lengthy))
    wl_day_dx = np.zeros((wl_lengthx, wl_lengthy))

    if wl_lengthx == wl_lengthy:
        for i in range(wl_lengthx):
            wl_dax_dy[i, :] = deriv(wl_alpha_x[i, :])
            wl_dax_dx[:, i] = deriv(wl_alpha_x[:, i])
            wl_day_dy[i, :] = deriv(wl_alpha_y[i, :])
            wl_day_dx[:, i] = deriv(wl_alpha_y[:, i])

    else:
        for i in range(wl_lengthx):
            wl_dax_dx[:, i] = deriv(wl_alpha_x[:, i])
            wl_day_dx[:, i] = deriv(wl_alpha_y[:, i])

        for i in range(wl_lengthy):
            wl_dax_dy[i, :] = deriv(wl_alpha_x[i, :])
            wl_day_dy[i, :] = deriv(wl_alpha_y[i, :])

    #Strong Lensing
    #make a copy of the grids but only over the SL region

    dim_minx = int((wl_lengthx - SL_pix) / 2)
    dim_plusx = int(
        (wl_lengthx + SL_pix) / 2) + 1  #numpy end interval is exclusive.
    dim_miny = int((wl_lengthy - SL_pix) / 2)
    dim_plusy = int((wl_lengthy + SL_pix) / 2) + 1

    sl_alpha_x = wl_alpha_x[dim_minx:dim_plusx, dim_minx:dim_plusx]
    sl_alpha_y = wl_alpha_y[dim_miny:dim_plusy, dim_miny:dim_plusy]

    sl_dax_dy = wl_dax_dy[dim_miny:dim_plusy, dim_miny:dim_plusy]
    sl_dax_dx = wl_dax_dx[dim_minx:dim_plusx, dim_minx:dim_plusx]
    sl_day_dy = wl_day_dy[dim_miny:dim_plusy, dim_miny:dim_plusy]
    sl_day_dx = wl_day_dx[dim_minx:dim_plusx, dim_minx:dim_plusx]
    sl_lengthx = len(sl_alpha_x[:, 0])
    sl_lengthy = len(sl_alpha_y[0, :])

    sl_x = np.arange(start=1, stop=int(sl_lengthx + 1))
    sl_y = np.arange(start=1, stop=int(sl_lengthy + 1))

    SLY, SLX = np.meshgrid(sl_x, sl_y)
    # shift between WL and SL frames (you will need this number to estimate the correct positions of the weak and strong lensed sources)
    pos_shift = (wl_lengthx - sl_lengthx) / 2.

    print('pos shift between sl and wl [pixels]:', pos_shift)

    cat = np.load(catalogue, allow_pickle=True)
    print('Reading Catalogue to be lensed')
    dfxx = cat.item().get('RA')
    dfyy = cat.item().get('DEC')
    dfflux = cat.item().get('Flux')
    dfredz = cat.item().get('Redshift')

    # Convert coordinates from degrees to pixels in the larger WL frame
    xx = (dfxx + LL * 30) / pixsize
    yy = (dfyy + LL * 30) / pixsize

    # Number of sources in catalogue
    nsources = len(xx)
    print('number of sources to be lensed:', nsources)

    print('Set Up Complete beggining to lense sources')

    start = time.time()

    for i in range(nsources):
        if not i % 1000:
            print('on source %s / %s' % (str(i), str(nsources)))
    #     print('lensing source %s' % str(i + 1))
    # begin dealing with foreground sources (we don't need to lens these) ---------------------
        if lens_z > dfredz[i]:
            #         print('source %s is a foreground source' % str(i + 1))
            mindX = xx[i]
            mindY = yy[i]
            mmu = 1.0

        else:
            # Finish dealing with foreground sources --------------------------------------------------
            # Begin dealing with background sources (we do need to lens these)
            # Estimate angular diameter distance to source (z = dfredz)
            DS2 = cos_model.luminosity_distance(dfredz[i]).value / (
                1.0 + dfredz[i])**2  # * 100 to match units in Adi's code
            # Estimate distance between the lensing cluster and source
            DLS2 = (DS2 * (1. + dfredz[i]) - DL1 *
                    (1. + lens_z)) / (1 + dfredz[i])
            #this is the factor to scale the deflection field to different source redshifts.
            scaling_factor = (DLS2 / DS2) / DLS1_DS1

            #begin SL regime analysis
            x_check = abs(dfxx[i]) < SLregime * 30
            y_check = abs(dfyy[i]) < SLregime * 30
            xy_check = x_check and y_check
            if xy_check:
                #             print('beggining strong lensing regime')
                #estimate source position in SL frame
                source_x = int((dfxx[i] + SLregime * 30) / pixsize)
                source_y = int((dfyy[i] + SLregime * 30) / pixsize)
                #estimate the magnifications (from lensing eq.), adding sl_day_dx and sl_day_dy element by element
                sl_dax_day_dxdy = np.multiply(sl_dax_dx, sl_day_dy)
                sl_day_dax_dydx = np.multiply(sl_dax_dy, sl_day_dx)

                poisson = np.add(sl_dax_dx, sl_day_dy) * scaling_factor
                magnification = abs(
                    1 / (1 - poisson +
                         (sl_dax_day_dxdy - sl_day_dax_dydx) * scaling_factor))
                #will need to double check that these matrix operations work on sl_day_dx and sl_dax_dx
                #find the pixels where lensed image (or images) end up
                x_side = SLX - sl_alpha_x * scaling_factor - source_x
                y_side = SLY - sl_alpha_y * scaling_factor - source_y
                source_dist = np.sqrt(x_side**2 + y_side**2)
                indX, indY = np.where(
                    source_dist <
                    searchlensed)  #this may have to change for a 2D array
                #if a lensed source was found within searched lensed do this

                if len(indX) > 1:

                    #cut a square sub-map including all pixels with "source_dist < searchlensed" for easy computation
                    min_x = min(indX)
                    min_y = min(indY)
                    max_x = max(indX)
                    max_y = max(indY)
                    temp_multi_x = max_x - min_x
                    temp_multi_y = max_y - min_y

                    #pick the larger of the two sides
                    if temp_multi_x - temp_multi_y >= 0:
                        multi_ll = temp_multi_x
                    else:
                        multi_ll = temp_multi_y

                    if (min_x + multi_ll) < len(source_dist[:, 0]):
                        if (min_y + multi_ll) < len(source_dist[0, :]):
                            regmap = source_dist[min_x:min_x + multi_ll + 1,
                                                 min_y:min_y + multi_ll + 1]
                        else:
                            regmap = source_dist[min_x:min_x + multi_ll + 1,
                                                 min_y:source_dist.shape[0] +
                                                 1]
                    elif (min_y + multi_ll) < len(source_dist[0, :]):
                        regmap = source_dist[min_x:source_dist.shape[0] + 1,
                                             min_y:min_y + multi_ll + 1]
                    else:
                        regmap = source_dist[min_x:source_dist.shape[0] + 1,
                                             min_y:source_dist.shape[0] + 1]

                    indX2, indY2 = np.where(regmap < searchlensed)

                    reg_centroids = np.zeros((2, len(indX)))
                    for j in range(len(indX2)):
                        regmask = np.zeros((len(regmap[:,
                                                       0]), len(regmap[0, :])))
                        region = find_regions(regmap, indX2[j], indY2[j],
                                              searchlensed, 0)
                        regmask[region] = 1.0
                        reg_centroids[:, j] = centroid(regmap * regmask)

                    #remove duplicates.
                    reg_centroidsx, indexes = np.unique(reg_centroids[0],
                                                        return_index=True)
                    reg_centroidsy = reg_centroids[1, indexes]

                    n_centroids = 0

                    mindX = np.zeros((len(reg_centroidsx)))
                    mindY = np.zeros((len(reg_centroidsx)))
                    mmu = np.zeros((len(reg_centroidsx)))

                    for j in range(len(reg_centroidsx)):
                        ic = np.where(
                            np.logical_and(
                                reg_centroids[0] == reg_centroidsx[j],
                                reg_centroids[1] == reg_centroidsy[j]))[0]

                        if ic.size > 0 and n_centroids == 0:
                            n_centroids += 1
                            #                         print('found multiples')

                            tempX = np.mean(indX2[ic] + min_x)
                            tempY = np.mean(indY2[ic] + min_y)
                            mmu[0] = np.mean(magnification[indX2[ic] + min_x,
                                                           indY2[ic] + min_y])

                            mindX[0] = tempX + pos_shift
                            mindY[0] = tempY + pos_shift

                        elif ic.size > 0 and n_centroids > 0:
                            #                         tempX = mindX
                            #                         tempY = mindY
                            #                         tempmu = mmu
                            #                         n_centroids += 1

                            #                         mindX = np.zeros((n_centroids+1))
                            #                         mindY = np.zeros((n_centroids+1))
                            #                         mmu = np.zeros((n_centroids+1))

                            #                         mindX[0:n_centroids] = tempX
                            #                         mindY[0:n_centroids] = tempY
                            #                         mmu[0:n_centroids] = tempmu

                            mindX[j] = np.mean(indX2[ic] + min_x) + pos_shift
                            mindY[j] = np.mean(indY2[ic] + min_y) + pos_shift
                            m = np.mean(magnification[indX2[ic] + min_x,
                                                      indY2[ic] + min_y])

                            mmu[j] = np.mean(m)

                elif len(indX) == 1:
                    mindX = np.mean(indX)
                    mindY = np.mean(indY)
                    mmu = np.mean(magnification[int(mindX), int(mindY)])
                    mindX = mindX + pos_shift
                    mindY = mindY + pos_shift
                else:

                    #                 print('None found in SL trying WL')
                    source_x = int(xx[i])
                    source_y = int(yy[i])

                    #these next 4 if statements are to define the position of the WLmask to search for source image
                    #the image will tend to appear outwards the cluster center
                    #the mask is therefore not centered on the source position, but shifted depending on the quadrant
                    if dfxx[i] <= 0 and dfyy[i] <= 0:
                        xi = int(source_x - WLmask)
                        xf = int(source_x + WLmask / 2.)
                        yi = int(source_y - WLmask)
                        yf = int(source_y + WLmask / 2.)
                    elif dfxx[i] < 0 and dfyy[i] > 0:
                        xi = int(source_x - WLmask / 2.)
                        xf = int(source_x + WLmask)
                        yi = int(source_y - WLmask / 2.)
                        yf = int(source_y + WLmask)
                    elif dfxx[i] > 0 and dfyy[i] > 0:
                        xi = int(source_x - WLmask / 2.)
                        xf = int(source_x + WLmask)
                        yi = int(source_y - WLmask / 2.)
                        yf = int(source_y + WLmask)
                    elif dfxx[i] > 0 and dfyy[i] < 0:
                        xi = int(source_x - WLmask / 2.)
                        xf = int(source_x + WLmask)
                        yi = int(source_y - WLmask)
                        yf = int(source_y + WLmask / 2.)
                    #make sure WLmask isnt outside the mask
                    if xi <= 0:
                        xi = 0
                    if yi <= 0:
                        yi = 0
                    if xf >= len(wl_dax_dx[:, 0]) - 1:
                        xf = len(wl_dax_dx[:, 0]) - 1
                    if yf >= len(wl_dax_dx[0, :]) - 1:
                        yf = len(wl_dax_dx[0, :1]) - 1

                    #not sure if we are concatenating these arrays or adding element by element
                    #estimating magnifications
                    poisson = np.add(wl_dax_dx[xi:xf, yi:yf],
                                     wl_day_dy[xi:xf, yi:yf]) * scaling_factor
                    wl_daxday_dxdy = np.multiply(wl_dax_dx[xi:xf, yi:yf],
                                                 wl_day_dy[xi:xf, yi:yf])
                    wl_daxday_dydx = np.multiply(wl_dax_dy[xi:xf, yi:yf],
                                                 wl_day_dx[xi:xf, yi:yf])
                    magnification = abs(1 / (1 - poisson + np.subtract(
                        wl_daxday_dxdy, wl_daxday_dydx) * scaling_factor))
                    #find the pixels where the lensed images end up
                    source_dist = np.sqrt(
                        (WLX[xi:xf, yi:yf] - wl_alpha_x[xi:xf, yi:yf] *
                         scaling_factor - source_x)**2. +
                        (WLY[xi:xf, yi:yf] - wl_alpha_y[xi:xf, yi:yf] *
                         scaling_factor - source_y)**2.)
                    indX, indY = np.where(source_dist < searchlensed)

                    if len(indX) > 0:
                        mmu = np.mean(magnification[indX, indY])
                        indX += xi
                        indY += yi

                        mindX = np.mean(indX)
                        mindY = np.mean(indY)
                    else:
                        indX = -999999.0
                        indY = -999999.0
                        mu = 0.0
                        mindX = np.mean(indX)
                        mindY = np.mean(indY)
                        mmu = np.mean(mu)

            else:
                #             print('WL regime')
                source_x = int(xx[i])
                source_y = int(yy[i])

                if dfxx[i] <= 0 and dfyy[i] <= 0:
                    xi = int(source_x - WLmask)
                    xf = int(source_x + WLmask / 2.)
                    yi = int(source_y - WLmask)
                    yf = int(source_y + WLmask / 2.)
                elif dfxx[i] < 0 and dfyy[i] > 0:
                    xi = int(source_x - WLmask)
                    xf = int(source_x + WLmask / 2.)
                    yi = int(source_y - WLmask / 2.)
                    yf = int(source_y + WLmask)
                elif dfxx[i] > 0.0 and dfyy[i] > 0.0:
                    xi = int(source_x - WLmask / 2.)
                    xf = int(source_x + WLmask)
                    yi = int(source_y - WLmask / 2.)
                    yf = int(source_y + WLmask)
                elif dfxx[i] > 0.0 and dfyy[i] < 0.0:
                    xi = int(source_x - WLmask / 2.)
                    xf = int(source_x + WLmask)
                    yi = int(source_y - WLmask)
                    yf = int(source_y + WLmask / 2.)
                if xi < 0:
                    xi = 0
                if yi < 0:
                    yi = 0
                if xf >= len(wl_dax_dx[:, 0]) - 1:
                    xf = len(wl_dax_dx[:, 0]) - 1
                if yf >= len(wl_dax_dx[0, :]) - 1:
                    yf = len(wl_dax_dx[0, :]) - 1

                xf += 1
                yf += 1
                poisson = (wl_dax_dx[xi:xf, yi:yf] +
                           wl_day_dy[xi:xf, yi:yf]) * scaling_factor
                wl_daxday_dxdy = np.multiply(wl_dax_dx[xi:xf, yi:yf],
                                             wl_day_dy[xi:xf, yi:yf])
                wl_daxday_dydx = np.multiply(wl_dax_dy[xi:xf, yi:yf],
                                             wl_day_dx[xi:xf, yi:yf])

                magnification = abs(
                    1 / (1 - poisson +
                         (wl_daxday_dxdy - wl_daxday_dydx) * scaling_factor))
                source_dist = np.sqrt(
                    (WLX[xi:xf, yi:yf] - wl_alpha_x[xi:xf, yi:yf] *
                     scaling_factor - source_x)**2. +
                    (WLY[xi:xf, yi:yf] -
                     wl_alpha_y[xi:xf, yi:yf] * scaling_factor - source_y)**2.)

                indX, indY = np.where(source_dist < searchlensed)
                if len(indX) > 0:
                    mmu = np.mean(magnification[indX, indY])
                    indX += xi
                    indY += yi

                    mindX = np.mean(indX)
                    mindY = np.mean(indY)
                else:
                    #if we are here it means there is no image (probably outside of the map)
                    #nonsense position + mag of zero
                    mindX = -999999.0
                    mindY = -999999.0
                    mmu = 0

            #--- done with WL regime
            #---- finished lensing analysis

        if i == 0:
            x_out = []
            y_out = []
            mu_out = []
            f_out = []

            if not hasattr(mindX, '__len__'):
                x_out.append(mindX)
                y_out.append(mindY)
                f_out.append(dfflux[i])
                mu_out.append(mmu)

            else:
                for j in range(len(mindX)):
                    x_out.append(mindX[j])
                    y_out.append(mindY[j])
                    mu_out.append(mmu[j])
                    f_out.append(dfflux[i])

        else:
            if not hasattr(mindX, '__len__'):  #check if an array
                x_out.append(mindX)
                y_out.append(mindY)
                mu_out.append(mmu)
                f_out.append(dfflux[i])
            else:
                for j in range(len(mindX)):
                    x_out.append(mindX[j])
                    y_out.append(mindY[j])
                    mu_out.append(mmu[j])
                    f_out.append(dfflux[i])

    x_out = np.asarray(x_out) * pixsize  #convert back to arcseconds
    y_out = np.asarray(y_out) * pixsize  #convert back to arcseconds
    f_out = np.asarray(f_out)
    mu_out = np.asarray(mu_out)

    f_final = np.multiply(f_out, mu_out)

    end = time.time()

    print('Finished Lensing sources total time: %s minutes' % str(
        (end - start) / 60.))
    return x_out, y_out, f_final
コード例 #12
0
#plt.xscale( 'log' )
plt.yscale('log')
plt.legend()
#plt.show()
plt.savefig('1.png')
plt.close()

alpha = 3.1
v_c = 1.4e9
n0 = 1e-9
g0 = 1
L = 1000 * 1000 * pc
v = np.linspace(1, 5000, 100)  #MHz

cos = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
dis = cos.luminosity_distance(0.2).value * 1e6 * pc
print(dis)
for b in B:
    bb = 10**b
    u_b = bb * bb / (8 * np.pi)
    E_tot = 2 / 3 * sig_t * u_b * n0 / v_c * np.power(
        v / v_c, -(alpha - 1) / 2) * np.power(L, 3)
    #print( E_tot.min() )
    flux = E_tot / (4 * np.pi * dis * dis) / Jansky * 1000
    #print( 4 * np.pi * dis * dis )
    #plt.plot( v, E_tot, label=r'$B=e^{%i}\mu g$'%(b) )
    plt.plot(v, flux, label=r'$B=e^{%i}\mu g$' % (b))
plt.ylabel(r'$(mJy)$')
plt.xlabel('MHz')
plt.yscale('log')
#plt.xscale( 'log' )
コード例 #13
0
def match(cat1, cat2, path, write, properties=None):
    if not os.path.isfile(cat1):
        cat1 = cat1.replace(cat1.split('_')[-1],
                            'z0.20.fits')  # use as dummy to get columns
        HI = False

    else:
        HI = True

    cat_name_HI = cat1
    cat_name_cont = cat2

    cat_fits_HI = fits.open(cat_name_HI)
    cat_fits_cont = fits.open(cat_name_cont)

    cat_HI = cat_fits_HI[1].data
    cat_cont = cat_fits_cont[1].data

    cols_HI = cat_fits_HI[1].columns.names
    cols_cont = cat_fits_cont[1].columns.names

    cat_HI_table = Table.read(cat_name_HI, format='fits')
    cat_cont_table = Table.read(cat_name_cont)

    for i in range(len(cols_cont)):
        if cols_cont[i] in cols_HI:
            cols_cont[i] = cols_cont[i] + '_1'

    # how to convert a recarray or fits table to np array:
    cat_HI_np = np.array(cat_HI_table).view(np.float32).reshape(
        (np.array(cat_HI_table).shape + (-1, )))
    cat_cont_np = np.array(cat_cont_table).view(np.float32).reshape(
        (np.array(cat_cont_table).shape + (-1, )))

    print(cols_cont, cols_HI)

    if HI:
        print('cat lengths', cat1.split('/')[-1], len(cat_HI), len(cat_cont))

        MHI_HI = cat_HI['MHI']
        MH_HI = cat_HI['Mh']
        #r_HI_HI = cat_HI['HI size']
        line_flux_HI = cat_HI['HI flux'] / 1000  # from mJy to Jy
        incl_HI = cat_HI['inclination']
        z_HI = cat_HI['redshift']
        OptClass_HI = cat_HI['OptClass']

        MHI_cont = cat_cont['MHI_pred']
        MH_cont = cat_cont['Mh_1']
        #r_HI_cont = cat_cont['HI size']
        incl_cont = cat_cont['inclination_1']
        z_cont = cat_cont['redshift_1']
        mass_function = 0

        if mass_function:
            cont_optclasses = [
                MHI_cont[cat_cont['RadioClass'] == 1],
                MHI_cont[cat_cont['RadioClass'] > 3]
            ]
            labels = ['late-type', 'late-type + AGN']
            colors = ['red', 'pink']

            plt.clf()

            norm = False
            #plt.hist(MHI_HI,range = (7.5, 12), bins = 100,log=True, histtype='step', fill=False,label = 'MHI', alpha = 1, normed=norm)
            #plt.hist(cont_optclasses,range = (7.5, 12),stacked = True, histtype='step', fill=False,bins = 100, log=True, alpha = 1, normed=norm, color = colors, label = labels)

            plt.legend()
            plt.xlabel(r'log MHI (M$_{\odot}$)')
            plt.ylabel('N sources')
            plt.title(cat2.split('_')[-1].split('.fits')[0])
            plt.savefig('cross/plots/HI_number_counts%s.png' %
                        cat2.split('_')[-1].split('.fits')[0])
            return

        #work out line_flux_pred from MHI_pred and dont match any continuum sources with line_flux_pred below line flux count

        H = 67.0
        M = 0.32
        L = 0.68
        c = 2.99792458e8
        G = 6.67408e-11
        cosmo = LambdaCDM(H0=H, Om0=M, Ode0=L)
        D_L_cont = cosmo.luminosity_distance(z_cont).value  # Mpc

        line_flux_cont = 10**MHI_cont / (49.8 * D_L_cont**2)

        print(MHI_cont)
        print(len(cat_cont), 'continuum sources')
        print(len(cat_cont[line_flux_cont >= line_flux_cut]),
              'continuum sources predict HI flux above HI cut')
        print(len(cat_cont[line_flux_cont < line_flux_cut]),
              'continuum sources will not be matched with HI')
        print(len(cat_HI), 'HI sources')
        print(
            len(cat_HI) - len(cat_cont[line_flux_cont >= line_flux_cut]),
            'HI sources will not be matched with continuum')
        print(
            len(cat_cont) + len(cat_HI) -
            len(cat_cont[line_flux_cont >= line_flux_cut]),
            'unique sources in catalogue')

        unmatched_cont = cat_cont_np[line_flux_cont < line_flux_cut]

        unmatched_cont_empty = np.zeros(
            (unmatched_cont.shape[0], cat_HI_np.shape[1])) - 100

        unmatched_cont_stack = np.hstack(
            (unmatched_cont_empty, unmatched_cont))

        matched_cat_cont_np = cat_cont_np[line_flux_cont >= line_flux_cut]

        # find lowest N MHI sources in HI cat, where N is the number of surplus HI sources after matching
        # with all continuum sources with predicted flux over HI flux threshold

        N_unmatched_HI = len(cat_HI) - len(
            cat_cont[line_flux_cont >= line_flux_cut])
        print('N_unmatched_HI', N_unmatched_HI)
        # catch values less than zero
        N_unmatched_HI = np.max((N_unmatched_HI, 0))
        print('N_unmatched_HI', N_unmatched_HI)

        print(line_flux_cont.shape)
        print(line_flux_HI.shape)
        # value of MHI_HI of Nth lowest source after sorting in order of MHI_HI
        sorted_line_flux_HI = np.sort(line_flux_HI)
        HI_cat_line_flux_cut = sorted_line_flux_HI[N_unmatched_HI]
        print('all HI sources with line flux below', HI_cat_line_flux_cut,
              'Jy will not be matched')

        unmatched_HI = cat_HI_np[line_flux_HI < HI_cat_line_flux_cut]

        unmatched_HI_empty = np.zeros(
            (unmatched_HI.shape[0], cat_cont_np.shape[1])) - 100

        unmatched_HI_stack = np.hstack((unmatched_HI, unmatched_HI_empty))

        matched_cat_HI_np = cat_HI_np[line_flux_HI >= HI_cat_line_flux_cut]

        all_cols = cols_HI + cols_cont

        unmatched_HI_table = Table()
        for i, col in enumerate(all_cols):
            unmatched_HI_table[col] = unmatched_HI_stack[:, i]

        unmatched_cont_table = Table()
        for i, col in enumerate(all_cols):
            unmatched_cont_table[col] = unmatched_cont_stack[:, i]

        matched_MHI_HI = MHI_HI[line_flux_HI >= HI_cat_line_flux_cut]
        matched_MHI_cont = MHI_cont[line_flux_cont >= line_flux_cut]
        print(matched_MHI_HI.shape, matched_MHI_cont.shape)

        sorted_matched_MHI_HI = matched_MHI_HI[np.argsort(matched_MHI_HI)]

        sorted_matched_MHI_cont = matched_MHI_cont[np.argsort(
            matched_MHI_cont)]
        for i in range(len(sorted_matched_MHI_HI)):
            print(sorted_matched_MHI_HI[i], sorted_matched_MHI_cont[i])

        print(sorted_matched_MHI_cont.shape)
        print(sorted_matched_MHI_HI.shape)
        both = np.vstack((sorted_matched_MHI_HI, sorted_matched_MHI_cont))
        print(both)

        matched_cat_HI_np = matched_cat_HI_np[np.argsort(matched_MHI_HI)]
        newcat2 = matched_cat_cont_np[np.argsort(matched_MHI_cont)]

        # now only need to match the matched catalogues, and reserve the unmatched to stack at the end

        matching_cat_HI_table = Table()
        for i, col in enumerate(cols_HI):
            matching_cat_HI_table[col] = matched_cat_HI_np[:, i]

    elif not HI:
        # make a numpy array for now

        newcat1 = np.zeros((cat_cont_np.shape[0], cat_HI_np.shape[1])) - 100
        matching_cat_HI_table = Table()
        for i, col in enumerate(cols_HI):
            matching_cat_HI_table[col] = newcat1[:, i]

        newcat2 = cat_cont_np

    # might need to make HI table from np array here as it is reordered

    # make it into a fits table
    cat = Table()
    for i, col in enumerate(cols_cont):
        cat[col] = newcat2[:, i]

    t_new = hstack([matching_cat_HI_table, cat])

    plt.clf()

    plt.scatter(t_new[t_new['OptClass_1'] == 2]['MHI'],
                t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                label='spirals')
    plt.scatter(t_new[t_new['OptClass_1'] == 1]['MHI'],
                t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                label='ellipticals')
    plt.xlabel(r'log MHI (M$_{\odot}$)')
    plt.ylabel(r'log MHI_pred (M$_{\odot}$)')
    plt.legend()
    plt.savefig(path + 'plots/MHI_pred_vs_MHI%s.png' %
                cat2.split('_')[-1].split('.fits')[0])

    plt.clf()

    plt.scatter(t_new[t_new['OptClass_1'] == 2]['MHI'],
                t_new[t_new['OptClass_1'] == 2]['MHI'] -
                t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                label='spirals')
    plt.scatter(t_new[t_new['OptClass_1'] == 1]['MHI'],
                t_new[t_new['OptClass_1'] == 1]['MHI'] -
                t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                label='ellipticals')
    plt.xlabel(r'log MHI (M$_{\odot}$)')
    plt.ylabel(r'log MHI - log MHI_pred (M$_{\odot}$)')
    plt.legend()
    plt.savefig(path + 'plots/MHI_pred_vs_MHI_res%s.png' %
                cat2.split('_')[-1].split('.fits')[0])

    if HI:

        # vstack the unmatched here
        t_new_all = vstack([t_new, unmatched_HI_table, unmatched_cont_table])
    else:
        t_new_all = t_new

    plot_MHI_dist = 0
    if plot_MHI_dist:
        plt.clf()
        plt.hist(t_new[t_new['OptClass_1'] == 2]['MHI_pred'],
                 alpha=0.5,
                 label='spirals')
        plt.hist(t_new[t_new['OptClass_1'] == 1]['MHI_pred'],
                 alpha=0.5,
                 label='ellipticals')
        plt.xlabel(r'log MHI_pred (M$_{\odot}$)')
        plt.ylabel('N')
        plt.legend()
        plt.savefig('MHI_pred%s.png' % cat2.split('_')[-1].split('.fits')[0])
        plt.clf()

        plt.hist(t_new[(t_new['RadioClass'] == 1)]['MHI_pred'],
                 alpha=0.5,
                 label='SFG-late')
        plt.hist(t_new[(t_new['RadioClass'] == 2)]['MHI_pred'],
                 alpha=0.5,
                 label='SFG-early')
        plt.hist(t_new[(t_new['RadioClass'] > 3)]['MHI_pred'],
                 alpha=0.5,
                 label='AGN')
        plt.xlabel(r'log MHI_pred (M$_{\odot}$)')
        plt.ylabel('N')
        plt.legend()
        plt.savefig('MHI_pred_radioclass%s.png' %
                    cat2.split('_')[-1].split('.fits')[0])
        plt.clf()

    outf = (path + cat2.split('/')[-1].replace("continuum", "X"))
    print('writing to.. ', outf)
    if write:
        t_new_all.write(outf, format='fits', overwrite=True)
コード例 #14
0
ファイル: manga.py プロジェクト: cjhang/ENLR
class MaNGA(object):
    """Class for a MaNGA observated galaxy"""
    def __init__(self, *arg, mangaid=None):
        """use plateifu and mangaid as the keyword to define a galaxy
        
            *arg: Only the first one is used, as the plateifu
            mangaid: the mangaid of the galaxy

            return MaNGA
        """

        ## Define the global data directories
        self.COSMO = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
        self.ESP = 1e-8
        self.C = 299792  # speed of light (km/s)
        self.drp_version = DRP_VERSION
        self.dap_version = DAP_VERSION
        ## Pre-read the DRP data

        if arg:
            self.plateifu = arg[0]
            self.drp = DRP[DRP['plateifu'] == self.plateifu]
            self.mangaid = self.drp['mangaid'][0]
        elif mangaid:
            self.mangaid = mangaid
            self.drp = DRP[DRP['mangaid'] == self.mangaid]
            self.plateifu = self.drp['plateifu'][0]
        else:
            print("No valid plateifu or mangaid detected!")
            raise ValueError
        self.plate, self.ifudsgn = re.split('-', self.plateifu)
        self.logcube_file = "{0}/{1}/stack/manga-{1}-{2}-LOGCUBE.fits.gz".format(
            DRP_DIR, self.plate, self.ifudsgn)
        if DAP_VERSION < '2.3.0':  # before MPL8
            self.mapsfile = DAP_DIR+self.plate + '/'+self.ifudsgn+'/' \
                            + 'manga-' + self.plate + '-' + self.ifudsgn \
                            + '-MAPS-{}-GAU-MILESHC.fits.gz'.format(PRODOCTS)
            self.datacubefile = DAP_DIR + '/'+self.plate+'/' \
                                + self.ifudsgn+'/'+'manga-' + self.plate \
                                + '-' + self.ifudsgn \
                                + '-LOGCUBE-{}-GAU-MILESHC.fits.gz'.format(PRODOCTS)
        elif DAP_VERSION >= '2.3.0':  #After MPL8
            self.mapsfile = '{0}/{1}/{2}/manga-{1}-{2}-MAPS-{3}-MILESHC-MILESHC.fits.gz'.format(
                DAP_DIR, self.plate, self.ifudsgn, PRODOCTS)
            self.datacubefile = '{0}/{1}/{2}/manga-{1}-{2}-LOGCUBE-{3}-MILESHC-MILESHC.fits.gz'.format(
                DAP_DIR, self.plate, self.ifudsgn, PRODOCTS)

        if IMAGE_DIR is None:
            self.image_file = "{}/{}/images/{}.png".format(
                DRP_DIR, self.plate, self.ifudsgn)
        else:
            self.image_file = "{}/{}-{}.png".format(IMAGE_DIR, self.plate,
                                                    self.ifudsgn)
        self.ra = self.drp['objra'][0]
        self.dec = self.drp['objdec'][0]
        self.ifura = self.drp['ifura'][0]
        self.ifudec = self.drp['ifudec'][0]
        self.elpetro_r = self.drp['nsa_elpetro_th50_r'][0]
        self.elpetro_ba = self.drp['nsa_elpetro_ba'][0]
        self.elpetro_phi = self.drp['nsa_elpetro_phi'][0]
        self.sersic_r = self.drp['nsa_sersic_th50'][0]
        self.sersic_ba = self.drp['nsa_sersic_ba'][0]
        self.sersic_phi = self.drp['nsa_sersic_phi'][0]
        self.sersic_n = self.drp['nsa_sersic_n'][0]
        self.psf = self.drp['rfwhm'][0]
        self.z = self.drp['nsa_z'][0]
        self.c = const.c.to(u.km / u.s).value
        self.d = self.COSMO.luminosity_distance(self.z)
        self.arcsec2kpc = self.COSMO.kpc_comoving_per_arcmin(self.z).to(
            u.kpc / u.arcsec)
        self.repeat, self.alter = self.find_repeat(self)

    @property
    def target_range(self):
        # check the target type of primary and secondary
        # currently we still use the 'full fill the field of view' and 'other'
        # by give a resonable value, use with cautious
        mngtarg1 = self.drp['mngtarg1']
        if np.bitwise_and(mngtarg1, 2**10):
            Rrange = 1.5  # 1.5Re Primary
        elif np.bitwise_and(mngtarg1, 2**11):
            Rrange = 2.5  # 2.5Re Secondary
        elif np.bitwise_and(mngtarg1, 2**13):
            Rrange = 1.5  # full fill the field of view
        else:
            Rrange = 1.5  # Other
        return Rrange

    def find_repeat(self, recommend=True):
        # read the manga data/repeat_observation.txt to check and return all the
        # repeat observations
        # return the plateifu list, also return the recommend one if recommend
        # is True
        mangaid = self.mangaid
        txt = np.loadtxt(package_path + '/data/repeat_observations.txt',
                         skiprows=5,
                         dtype='str')
        target = txt[txt[:, 0] == mangaid]
        repeat_list = []
        if len(target) > 0:
            for i in range(1, 9, 2):
                plate = target[0][i]
                ifu = target[0][i + 1]
                if plate != '-1':
                    repeat_list.append(plate + '-' + ifu)
            if recommend:
                # pick all the ifu out and find the maximum
                ifu_list = list(
                    map(lambda s: re.match(r"\d+-(\d+)\d", s).group(1),
                        repeat_list))
                recommend_plateifu = np.array(repeat_list)[np.array(ifu_list)
                                                           == max(ifu_list)]
                # when serveral alternative, the first one is returned
                return repeat_list, recommend_plateifu[0]
        return repeat_list, None

    def image(self, ax=None, showImage=True):
        """return or show rgb-image
        """
        try:
            imagedata = mpimg.imread(self.image_file)
        except:
            print("{} image file doesn't exist!".format(self.plateifu))
            imagedata = np.zeros((2, 2))

        if ax == None:
            fig = plt.figure()
            ax = fig.add_subplot(111)
        ax.imshow(imagedata)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        # set the 'show=False' when use it as a figure modifier
        if showImage:
            plt.show()

    def showdetail(self, showDRPInfo=True, showDAPInfo=True):
        """ show dap and drp detail of the target
        drp3qual see: https://trac.sdss.org/wiki/MANGA/TRM/TRM_MPL-5/metadata
        """
        if showDRPInfo:
            drp_qual_dict = dict({
                0: 'GOOD',
                1: 'RETIRED',
                2: 'BADDEPTH',
                4: 'SKYSUBBAD',
                8: 'HIGHSCAT',
                16: 'BADASTROM',
                32: 'VARIABLELSF',
                64: 'BADOMEGA',
                256: 'BADFLUX',
                512: 'BADPSF',
                2**30: 'CRITICAL'
            })
            drp3qual = self.drp['drp3qual'][0]
            print("****** DRP Imformation ******** ")
            print('The drp3qual is: {0}'.format(drp_qual_dict[drp3qual]))
            print('The image information:')
            print('elpetro_r = {0}, b/a = {1}, phi = {2}'.format(
                self.elpetro_r, self.elpetro_ba, self.elpetro_phi))
            print('The redshift is: {0}'.format(self.z))
            print('The distance is: {0}'.format(self.d))
            print('Re is: {0}'.format(self.arcsec2kpc * self.elpetro_r *
                                      u.arcsec))
            # print('LOG10(LOIII) is: {0}'.format(logO3))
        if showDAPInfo:
            dap_qual_dict = dict({
                0: 'GOOD',
                1: 'FORESTAR',
                2: 'BADZ',
                4: 'LINELESS',
                8: 'PPXFFAIL',
                16: 'SINGLEBIN',
                2**28: 'DRPCRIT',
                2**29: 'DAPCRIT',
                2**30: 'CRITICAL'
            })
            print("****** DAP Imformation ******** ")
            print('The DAP quality is: {0}'.format(
                dap_qual_dict[self.maps[0].header['DAPQUAL']]))
コード例 #15
0
def lum_dist(z):
    obj = LambdaCDM(H0=69.6, Om0=0.286, Ode0=1 - 0.286)
    dist = obj.luminosity_distance(z).value * 3.08567758e24  # cm
    return dist
コード例 #16
0
#print sn

tt = np.genfromtxt('chi_100_re.txt')

z = sn[:, 1]
DM = sn[:, 2]
DM_err = sn[:, 3]

ho = 70
zz = np.arange(z.min(), 1.5, 0.001)
print zz
DMm = []
for k in range(len(zz)):
    d_L = LambdaCDM(H0=ho, Om0=0.278, Ode0=0.723)
    d_L = d_L.luminosity_distance(zz[k]).value
    # print k, z[k],d_L
    DM_m = 5 * np.log10(d_L) + 25
    DMm.append(DM_m)
    #print k
plt.plot(zz, DMm, c='r', linewidth=2)
plt.scatter(z, DM, facecolor='white', edgecolors='b', linewidths=1)
plt.errorbar(z, DM, yerr=DM_err, color='b', alpha=0.5, capsize=2, fmt='None')
plt.text(1.2, 42, r'$\Omega_m$=0.278', size=14)
plt.text(1.2, 41, r'$\Omega_{\Lambda}$=0.723', size=14)
plt.xlabel(r'redshift [z]')
plt.ylabel(r'Distance Modulus')
plt.title(r'SN data')
plt.show()

size = 100