def text_summary(self):
        """ """
        text = ""
        for name, res in sorted(self.cache.items()):
            detections = [
                x for x in res["prv_candidates"] + [res["candidate"]]
                if "isdiffpos" in x.keys()
            ]
            detection_jds = [x["jd"] for x in detections]
            first_detection = detections[detection_jds.index(
                min(detection_jds))]
            latest = [
                x for x in res["prv_candidates"] + [res["candidate"]]
                if "isdiffpos" in x.keys()
            ][-1]
            try:
                last_upper_limit = [
                    x for x in res["prv_candidates"]
                    if np.logical_and("isdiffpos" in x.keys(),
                                      x["jd"] < first_detection["jd"])
                ][-1]

                text += self.candidate_text(
                    name,
                    first_detection["jd"],
                    last_upper_limit["diffmaglim"],
                    last_upper_limit["jd"],
                )

            # No pre-detection upper limit
            except IndexError:
                text += self.candidate_text(name, first_detection["jd"], None,
                                            None)

            ned_z, ned_dist = query_ned_for_z(
                ra_deg=latest["ra"],
                dec_deg=latest["dec"],
                searchradius_arcsec=1,
                logger=self.logger,
            )

            if ned_z:
                ned_z = float(ned_z)
                absmag = self.calculate_abs_mag(latest["magpsf"], ned_z)
                if ned_z > 0:
                    z_dist = Distance(z=ned_z, cosmology=cosmo).value
                    text += f"It has a spec-z of {ned_z:.3f} [{z_dist:.0f} Mpc] and an abs. mag of {absmag:.1f}. Distance to SDSS galaxy is {ned_dist:.2f} arcsec. "
                    if self.dist:
                        gw_dist_interval = [
                            self.dist - self.dist_unc,
                            self.dist + self.dist_unc,
                        ]

            c = SkyCoord(res["candidate"]["ra"],
                         res["candidate"]["dec"],
                         unit="deg")
            g_lat = c.galactic.b.degree
            if abs(g_lat) < 15.0:
                text += f"It is located at a galactic latitude of {g_lat:.2f} degrees. "

            xmatch_info = get_cross_match_info(raw=res, logger=self.logger)
            text += xmatch_info
            text += "\n"

        return text
Exemplo n.º 2
0
def correct_rgc(coord,
                glx_ctr=ICRS('00h42m44.33s', '+41d16m07.5s'),
                glx_PA=Angle('37d42m54s'),
                glx_incl=Angle('77.5d'),
                glx_dist=Distance(783, unit=u.kpc)):
    # TODO: reference for the 783 kpc distance
    """Computes deprojected galactocentric distance.
    Inspired by: http://idl-moustakas.googlecode.com/svn-history/
        r560/trunk/impro/hiiregions/im_hiiregion_deproject.pro
    Parameters
    ----------
    coord : :class:`astropy.coordinates.ICRS`
        Coordinate of points to compute galactocentric distance for.
        Can be either a single coordinate, or array of coordinates.
    glx_ctr : :class:`astropy.coordinates.ICRS`
        Galaxy center.
    glx_PA : :class:`astropy.coordinates.Angle`
        Position angle of galaxy disk.
    glx_incl : :class:`astropy.coordinates.Angle`
        Inclination angle of the galaxy disk.
    glx_dist : :class:`astropy.coordinates.Distance`
        Distance to galaxy.
    Returns
    -------
    obj_dist : class:`astropy.coordinates.Distance`
        Galactocentric distance(s) for coordinate point(s).
    """
    # distance from coord to glx centre
    sky_radius = glx_ctr.separation(coord)
    # TODO: what on earth is this avg_dec doing here?
    avg_dec = 0.5 * (glx_ctr.dec + coord.dec).radian
    x = (glx_ctr.ra - coord.ra) * numpy.cos(avg_dec)
    y = glx_ctr.dec - coord.dec
    # azimuthal angle from coord to glx  -- not completely happy with this
    phi = glx_PA - Angle('90d') \
            + Angle(numpy.arctan(y.arcsec / x.arcsec), unit=u.rad)

    # TODO: this does not even look remotely like the original
    # https://github.com/moustakas/moustakas-projects/blob/master/
    # hiiregions/im_hiiregion_deproject.pro
    # These two lines below looks like cartesian coordinates are calculated
    # from polar coordinates, where the radius sky_radius is obtained by
    # calculating the angular difference, and the angle is obtained by taking
    # the position angle into account as well as polar coordinate angle
    # phi=arctan(y/x). O_o what's going on here?

    # convert to coordinates in rotated frame, where y-axis is galaxy major
    # ax; have to convert to arcmin b/c can't do sqrt(x^2+y^2) when x and y
    # are angles
    xp = (sky_radius * numpy.cos(phi.radian)).arcmin
    yp = (sky_radius * numpy.sin(phi.radian)).arcmin

    # de-project
    ypp = yp / numpy.cos(glx_incl.radian)
    obj_radius = numpy.sqrt(xp**2 + ypp**2)  # in arcmin
    obj_dist = Distance(Angle(obj_radius, unit=u.arcmin).radian * glx_dist,
                        unit=glx_dist.unit)

    # Computing PA in disk (unused)
    obj_phi = Angle(numpy.arctan(ypp / xp), unit=u.rad)
    # TODO Zero out very small angles, i.e.
    # if numpy.abs(Angle(xp, unit=u.arcmin)) < Angle(1e-5, unit=u.rad):
    #     obj_phi = Angle(0.0)

    return obj_dist
Exemplo n.º 3
0
from astropy import units as u
from at2019dsg.data import bran_z


def convert_radio(flux_mjy, frequency_ghz):
    flux_jy = 10**-3 * flux_mjy
    frequency_hz = 10**9 * frequency_ghz
    return 10**-23 * flux_jy * frequency_hz


def convert_to_mjy(energy_flux, frequency_ghz):
    frequency_hz = 10**9 * frequency_ghz
    return 10**3 * 10**23 * energy_flux / frequency_hz


dl = Distance(z=bran_z).to("cm").value
area = 4 * np.pi * (dl**2)

flux_conversion = (1 + bran_z) / area

colors = {
    "r.IOO": "r",
    "r.ZTF": "r",
    "r.SEDm": "r",
    "g.ZTF": "g",
    "g.IOO": "g",
    "UVW2": "violet",
    "UVM2": "purple",
    "UVW1": "darkblue",
    "U": "lightblue",
}
Exemplo n.º 4
0
def dL_to_z(dL):  # convert distance to redshift, assuming a cosmology

    return Distance(dL, unit=u.Mpc).compute_z(cosmology=cosmo)
Exemplo n.º 5
0
def load_mccon12_table(
        mcconn_url='https://www.astrosci.ca/users/alan/Nearby_Dwarfs_Database_files/NearbyGalaxies.dat',
        qtable=True):
    from astropy.utils import data
    from astropy.io import ascii

    # have to do SSL stuff because astrosci.ca has expired SSL
    import ssl
    from urllib.error import URLError

    baseline_create = ssl._create_default_https_context
    try:
        mcconn_tab_str = data.get_file_contents(mcconn_url, cache=True)
    except URLError as e:
        ee[0] = e
        if 'SSL: CERTIFICATE_VERIFY_FAILED' in str(e.args):
            ssl._create_default_https_context = ssl._create_unverified_context
            exec(toexec)
        else:
            raise
    finally:
        ssl._create_default_https_context = baseline_create

    headerrow = mcconn_tab_str.split('\n')[32]
    colnames = headerrow.split()
    colidxs = [headerrow.rindex(col) for col in colnames]

    # this *removes* the references
    col_starts = colidxs[:-1]
    col_ends = [i - 1 for i in colidxs[1:]]
    colnames = colnames[:-1]

    str_tab = ascii.read(mcconn_tab_str.split('\n')[34:],
                         format='fixed_width_no_header',
                         names=colnames,
                         col_starts=col_starts,
                         col_ends=col_ends)

    mcconn_tab = (QTable if qtable else Table)()
    mcconn_tab['Name'] = [s.strip() for s in str_tab['GalaxyName']]

    scs = []
    for row in str_tab:
        dm = float(row['(m-M)o'].split()[0])
        scs.append(
            SkyCoord(row['RA'],
                     row['Dec'],
                     unit=(u.hour, u.deg),
                     distance=Distance(distmod=dm)))
    mcconn_tab['Coords'] = SkyCoord(scs)

    for col in str_tab.colnames[3:]:
        if col in ('EB-V', 'F', 'MHI'):
            #single number
            mcconn_tab[col] = [float(s) for s in str_tab[col]]
        else:
            # num + -
            vals, ps, ms = [], [], []
            for s in str_tab[col]:
                val, p, m = s.split()
                vals.append(float(val))
                ps.append(float(p))
                ms.append(float(m))
            mcconn_tab[col] = vals
            mcconn_tab[col + '+'] = ps
            mcconn_tab[col + '-'] = ms

    return mcconn_tab
Exemplo n.º 6
0
def _GaiaDR2Match(row,
                  fC,
                  match_radius=1,
                  gaia_mag_tolerance=0.5,
                  id_check=True):

    flags = 0

    coo = SkyCoord(row['_RAJ2000'],
                   row['_DEJ2000'],
                   frame='icrs',
                   unit=(u.hourangle, u.deg))
    s = coo.to_string('decimal', precision=5).split()
    _ = StringIO()
    with redirect_stdout(_), redirect_stderr(_):
        job = Gaia.launch_job(_query.format(s[0], s[1]))
    DR2Table = job.get_results()

    # Replace missing values for pmra, pmdec, parallax
    DR2Table['pmra'].fill_value = 0.0
    DR2Table['pmdec'].fill_value = 0.0
    DR2Table['parallax'].fill_value = 0.0
    DR2Table = Table(DR2Table.filled(), masked=True)
    # Avoid problems with small/negative parallaxes
    DR2Table['parallax'].mask = DR2Table['parallax'] <= 0.1
    DR2Table['parallax'].fill_value = 0.0999
    DR2Table = DR2Table.filled()
    # Fix units for proper motion columns
    DR2Table['pmra'].unit = 'mas / yr'
    DR2Table['pmdec'].unit = 'mas / yr'

    cat = SkyCoord(DR2Table['ra'],
                   DR2Table['dec'],
                   frame='icrs',
                   distance=Distance(parallax=DR2Table['parallax'].quantity),
                   pm_ra_cosdec=DR2Table['pmra'],
                   pm_dec=DR2Table['pmdec'],
                   obstime=Time(2015.5,
                                format='decimalyear')).apply_space_motion(
                                    new_obstime=Time('2000-01-01 00:00:00.0'))
    idx, d2d, _ = coo.match_to_catalog_sky(cat)
    if d2d > match_radius * u.arcsec:
        raise ValueError('No Gaia DR2 source within specified match radius')

    try:
        key = re.match('[AFGKM][0-9]', row['SpTy'])[0]
        GV = SpTypeToGminusV[key]
    except TypeError:
        flags += 1024
        GV = -0.15
    try:
        Gmag = float(row['Gmag'])
    except ValueError:
        raise ValueError('Invalid Gmag value ', row['Gmag'])
    except KeyError:
        Gmag = row['Vmag'] + GV

    if abs(Gmag - DR2Table['phot_g_mean_mag'][idx]) > gaia_mag_tolerance:
        if 'Gmag' in row.colnames:
            print("Input value: G = ", Gmag)
        else:
            print("Input values: V = {:5.2f}, SpTy = {} -> G_est = {:5.2f}".
                  format(row['Vmag'], row['SpTy'], Gmag))
        print("Catalogue values: G = {:5.2f}, Source = {}".format(
            DR2Table['phot_g_mean_mag'][idx], DR2Table['source_id'][idx]))
        raise ValueError('Nearest Gaia source does not match estimated G mag')

    if (str(row['Old_Gaia_DR2']) != str(DR2Table['source_id'][idx])):
        if id_check:
            raise ValueError('Nearest Gaia DR2 source does not match input ID')
        flags += 32768

    gmag = np.array(DR2Table['phot_g_mean_mag'])
    sep = coo.separation(cat)
    if any((sep <= 51 * u.arcsec) & (gmag < gmag[idx])):
        flags += 16384
    if any((sep > 51 * u.arcsec) & (sep < 180 * u.arcsec)
           & (gmag < gmag[idx])):
        flags += 8192

    gflx = np.ma.array(10**(-0.4 * (gmag - gmag[idx])),
                       mask=False,
                       fill_value=0.0)
    gflx.mask[idx] = True
    contam = np.nansum(gflx.filled() * fC(cat.separation(cat[idx]).arcsec))

    if contam > 1:
        flags += 4096
    elif contam > 0.1:
        flags += 2048

    return DR2Table[idx], contam, flags, cat[idx]
Exemplo n.º 7
0
disc = [
    6.3859852129837405e+44, 7.815868649313232e+42, 3.6134023859374993e+43,
    3.113371620728157e+43, 1.3362016711943853e+34, 2.0391507422076078e+44,
    np.nan, 1.5297311429456913e+50, 1.2405910280432744e+50, np.nan,
    6.997817458802234e+49, np.nan, 1.0892622349494232e+50,
    9.083837244328854e+49, -1.6681279919378288e+51, 8.478839258207428e+49,
    8.105037392489653e+49, 8.337753034670461e+49, 8.149846392365348e+49,
    8.225600142986851e+49, 7.780114890394365e+49, 7.786759130127634e+49,
    7.787456191656473e+49, 7.811685713236478e+49, 7.692888704265783e+49,
    7.592978045804951e+49
]

mask = dist_mpc < 120.

zs = [Distance(Distance(dl * u.Mpc)).compute_z() for dl in dist_mpc[mask]]

print(zs)

x = np.log(zs)
y = np.log(np.array(e_per_source)[mask])

[m, c] = np.polyfit(x, y, 1)


def f(x):
    return np.exp(m * np.log(x) + c)


name_root = "analyses/ztf/depth/"
save_dir = plot_output_dir(name_root)
Exemplo n.º 8
0
import numpy as np
import os
from flarestack.shared import transients_dir

# Start and end time of neutrino flare, taken from box fit in
# https://arxiv.org/abs/1807.08794.

t_start = 56937.81
t_end = 57096.21

# Ra and dec of source, from Science paper (https://arxiv.org/abs/1807.08794)
ra = 77.3582
dec = 5.69314

# Distance to source, according to https://arxiv.org/abs/1802.01939, is 0.3365
z = 0.3365
lumdist = Distance(z=z).to("Mpc").value

# Creates the .npy source catalogue
txs_catalogue = custom_sources(name="TXS_0506+056",
                               ra=ra,
                               dec=dec,
                               weight=1.,
                               distance=lumdist,
                               start_time=t_start,
                               end_time=t_end,
                               ref_time=t_start)

txs_cat_path = transients_dir + "/TXS_0506+056.npy"
np.save(txs_cat_path, txs_catalogue)
Exemplo n.º 9
0
rad = np.arange(1.0, 67.0, 5.0)

rand_i = 0

for r_i, r in enumerate(rad):

    spheres = h5_arr(spheresfile, "radecz_{0}".format(str(r_i * 5 + 1)))

    badvols = np.zeros(spheres.shape[0])

    for i, sphere in enumerate(spheres):

        rang = Angle(sphere[0], u.deg)
        decang = Angle(sphere[1], u.deg)

        dis = Distance(comv(sphere[2]), u.Mpc)

        coord = ICRSCoordinates(rang, decang, distance=dis)

        sph_cen = np.array([coord.x.value, coord.y.value, coord.z.value])

        print "rad: ", r, ", sphere: ", i

        # Get radius of circular projection of sphere
        R = np.arcsin(r / np.sqrt(np.sum(sph_cen[:]**2)))

        # Get coordinates of circle centre on unit sphere
        crc_cen = radec2xyz(sphere[:2])[0]

        # Compute tree search radius from Cosine rule
        # (include points extending beyond sphere edge to account for
Exemplo n.º 10
0
for n in np.arange(n,m):
    #create a list in which all of the available volumes for the target will be stored
    available_volumes = []
    try:
    #pull quantities from the list
        RA = quasar_list.iloc[n][0]
        RA = str(RA)
        DEC = quasar_list.iloc[n][1]
        DEC = str(DEC)
        coords = RA + "_" + DEC
        redshift_target = quasar_list.iloc[n][3] #redshift of the target.
        r_mag_target = quasar_list.iloc[n][2] #magnitude of target
    
    #calculate the absolute magnitude of the target from the red magnitude
        dL = Distance(unit = u.Mpc, z=redshift_target, cosmology = cosmo) / u.Mpc
        magnitude_target = r_mag_target - 5*np.log(dL) - 25

        #if you have the absolute magnitude of your target, remove the above calculation and use:
        #magnitude_target = quasar_list.iloc[n][2] 
        #instead
     
        
#print(r_magnitude_target, magnitude_target, redshift_target)

    #search for and download all data products associated with those coordinates    
        obsTable = Observations.query_region(coords, radius = 0.02)
        dataProductsByObservation = Observations.get_product_list(obsTable)
        manifest = Observations.download_products(dataProductsByObservation, download_dir=coords,
                                                     obs_collection = "HST",
                                                     dataproduct_type = "image",
Exemplo n.º 11
0
}
# dictionary test for log parabola
lp_dict_test = {
    "type": "LogParabola",
    "parameters": {
        "p": p_test,
        "q": q_test,
        "gamma_0": gamma_0,
        "gamma_min": gamma_min_test,
        "gamma_max": gamma_max_test,
    },
}
# blob parameters
R_b_test = 1e16 * u.cm
B_test = 1 * u.G
z_test = Distance(1e27, unit=u.cm).z
delta_D_test = 10
Gamma_test = 10
pwl_blob_test = Blob(
    R_b_test,
    z_test,
    delta_D_test,
    Gamma_test,
    B_test,
    spectrum_norm_test,
    pwl_dict_test,
)
lp_blob_test = Blob(
    R_b_test,
    z_test,
    delta_D_test,
Exemplo n.º 12
0
    def __get_all_state_vectors(self):
        '''
        Very optimized way to get all state vectors from spice.

        The code is sort of convoluted, but it works and is several 
        times faster than any alternative without resorting to 
        using CSpice directly (don't want to deal with that).

        1. set(list(zip(...))) gets unique (body, time) pairs.

        2. These pairs are used as the keys in a dictionary storing
           the state vectors as numpy arrays. The key is also passed 
           to the __state_from_spice function to calculate the state 
           vector.

        3. Use a list comprehension to populate a numpy array with 
           all state vectors by reading from the dictionary. This is faster 
           than numpy indexing. The time complexity is O(1).

        4. Set attributes using the usual astropy units.

        TODO: Allow for mixing between terrestrial and non-terrestrial observers 
        '''

        unique_rocks_and_times = set(list(zip(self.spiceid, self.epoch)))
        unique_dict = {
            key: self.__state_from_spice(key)
            for key in unique_rocks_and_times
        }
        x, y, z, vx, vy, vz = np.array([
            unique_dict[(body, time)]
            for body, time in zip(self.spiceid, self.epoch)
        ]).T

        #print(x, y, z, vx, vy, vz)
        #print(unique_dict)

        if hasattr(self, 'obscode'):
            unique_locations_and_times = set(
                list(
                    zip(self.lon.deg, self.lat.deg, self.elevation,
                        self.epoch)))
            unique_obs_dict = {
                key: self.__compute_topocentric_correction(key)
                for key in unique_locations_and_times
            }
            dx_icrs, dy_icrs, dz_icrs = np.array([
                unique_obs_dict[(lon, lat, elev, time)]
                for lon, lat, elev, time in zip(self.lon.deg, self.lat.deg,
                                                self.elevation, self.epoch)
            ]).T

            # Transform from icrs to ecliptic
            dx = dx_icrs
            dy = dy_icrs * np.cos(epsilon) + dz_icrs * np.sin(epsilon)
            dz = -dy_icrs * np.sin(epsilon) + dz_icrs * np.cos(epsilon)

            x += (dx * u.m).to(u.km).value
            y += (dy * u.m).to(u.km).value
            z += (dz * u.m).to(u.km).value

        self.x = Distance(x, u.km, allow_negative=True).to(u.au)
        self.y = Distance(y, u.km, allow_negative=True).to(u.au)
        self.z = Distance(z, u.km, allow_negative=True).to(u.au)

        self.vx = (vx * u.km / u.s).to(u.au / u.day)
        self.vy = (vy * u.km / u.s).to(u.au / u.day)
        self.vz = (vz * u.km / u.s).to(u.au / u.day)
Exemplo n.º 13
0
 def distance(self):
     return Distance(785. * u.kpc)
Exemplo n.º 14
0
timestart_local = Time("{0} {1}".format(DATE, start_time), format='iso')
timeend_local = Time("{0} {1}".format(DATE, end_time), format='iso')
timestart_utc = timestart_local - TIMEZONE * u.hour
timeend_utc = timeend_local - TIMEZONE * u.hour

data = pd.DataFrame()

index = 0
time = timestart_utc
while time <= timeend_utc:
    sun = get_body('sun', time, loc, ephemeris='builtin')
    moon = get_body('moon', time, loc, ephemeris='builtin')
    pa = sun.position_angle(moon).degree
    altaz_sun = sun.transform_to(AltAz(location=loc, obstime=time))
    altaz_moon = moon.transform_to(AltAz(location=loc, obstime=time))
    sun_dist = Distance(sun.distance, unit=u.cm)
    moon_dist = Distance(moon.distance, unit=u.cm)
    sun_size = 2 * sun_radius * 60 * 180 / (np.pi * sun_dist)
    moon_size = 2 * moon_radius * 60 * 180 / (np.pi * moon_dist)

    data.loc[index, 'UT'] = time
    data.loc[index, 'PA'] = round(sun.position_angle(moon).degree, 3)
    data.loc[index, 'SunALT'] = round(altaz_sun.alt.degree, 3)
    data.loc[index, 'SunAZ'] = round(altaz_sun.az.degree, 3)
    data.loc[index, 'SunAZ'] = round(altaz_sun.az.degree, 3)
    data.loc[index, 'SunSize'] = round(float(sun_size), 3)
    data.loc[index, 'MoonALT'] = round(altaz_moon.alt.degree, 3)
    data.loc[index, 'MoonAZ'] = round(altaz_moon.az.degree, 3)
    data.loc[index, 'MoonSize'] = round(float(moon_size), 3)

    index += 1
Exemplo n.º 15
0
def test_distance_nan():
    # Check that giving NaNs to Distance doesn't emit a warning
    Distance([0, np.nan, 1] * u.m)
Exemplo n.º 16
0
def calculate_transient_cosmology(e_pdf_dict,
                                  rate,
                                  name,
                                  zmax=8.,
                                  nu_bright_fraction=1.0,
                                  diffuse_fraction=None,
                                  diffuse_fit="joint_15"):

    e_pdf_dict = read_e_pdf_dict(e_pdf_dict)

    diffuse_flux, diffuse_gamma = get_diffuse_flux_at_1GeV(diffuse_fit)

    logger.info("Using the {0} best fit values of the diffuse flux.".format(
        diffuse_fit))
    # print "Raw Diffuse Flux at 1 GeV:", diffuse_flux / (4 * np.pi * u.sr)
    logger.info("Diffuse Flux at 1 GeV: {0}".format(diffuse_flux))
    logger.info("Diffuse Spectral Index is {0}".format(diffuse_gamma))

    if "gamma" not in e_pdf_dict:
        logger.warning(
            "No spectral index has been specified. "
            "Assuming source has spectral index matching diffuse flux")
        e_pdf_dict["gamma"] = diffuse_gamma

    savedir = plots_dir + "cosmology/" + name + "/"

    try:
        os.makedirs(savedir)
    except OSError:
        pass

    energy_pdf = EnergyPDF.create(e_pdf_dict)
    fluence_conversion = energy_pdf.fluence_integral() * u.GeV**2

    if "nu_flux_at_1_gev" in e_pdf_dict.keys():
        nu_flux_at_1_gev = e_pdf_dict["nu_flux_at_1_gev"].to("GeV-1")
        nu_e = (nu_flux_at_1_gev * fluence_conversion).to("erg")

    else:
        nu_e = e_pdf_dict["source_energy_erg"]
        nu_flux_at_1_gev = nu_e.to("GeV") / fluence_conversion

    gamma = e_pdf_dict["gamma"]

    logger.info(f"Neutrino Energy is {nu_e:.2g}")
    logger.info(f"Neutrino Flux at 1 GeV is {nu_flux_at_1_gev:.2g}")
    logger.info(f"Local rate is {rate(0.0):.2g}")

    zrange, step = np.linspace(0.0, zmax, int(1 + 1e3), retstep=True)

    rate_per_z, nu_flux_per_z, nu_flux_per_source, cumulative_nu_flux = \
        define_cosmology_functions(rate, nu_flux_at_1_gev, gamma, nu_bright_fraction)

    logger.info(
        f"Cumulative sources at z=8.0: {cumulative_z(rate_per_z, 8.0)[-1].value:.2g}"
    )

    nu_at_horizon = cumulative_nu_flux(8)[-1]

    logger.info(f"Cumulative flux at z=8.0 (1 GeV): {nu_at_horizon:.2g}")
    logger.info(
        f"Cumulative annual flux at z=8.0 (1 GeV): {(nu_at_horizon * u.yr).to('GeV-1 cm-2 sr-1'):.2g}"
    )

    ratio = nu_at_horizon.value / diffuse_flux.value
    logger.info(f"Fraction of diffuse flux at 1GeV: {ratio:.2g}")
    logger.info(f"Cumulative neutrino flux {nu_at_horizon:.2g}")
    logger.debug(f"Diffuse neutrino flux {diffuse_flux:.2g}")

    if diffuse_fraction is not None:
        logger.info(
            f"Scaling flux so that, at z=8, the contribution is equal to {diffuse_fraction:.2g}"
        )
        nu_flux_at_1_gev *= diffuse_fraction / ratio
        logger.info(
            f"Neutrino Energy rescaled to {(nu_flux_at_1_gev * fluence_conversion).to('erg'):.2g}"
        )

    plt.figure()
    plt.plot(zrange, rate(zrange))
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.ylabel(r"Rate [Mpc$^{-3}$ year$^{-1}$]")
    plt.tight_layout()
    plt.savefig(savedir + 'rate.pdf')
    plt.close()

    plt.figure()
    plt.plot(zrange, rate_per_z(zrange) / rate(zrange))
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.ylabel(r"Differential Comoving Volume [Mpc$^{3}$ dz]")
    plt.tight_layout()
    plt.savefig(savedir + 'comoving_volume.pdf')
    plt.close()

    logger.debug("Sanity Check:")
    logger.debug("Integrated Source Counts \n")

    for z in [0.01, 0.08, 0.1, 0.2, 0.3, 0.7, 8]:
        logger.debug("{0}, {1}, {2}".format(z,
                                            Distance(z=z).to("Mpc"),
                                            cumulative_z(rate_per_z, z)[-1]))

    for nearby in [0.1, 0.3]:

        logger.info(
            f"Fraction of flux from nearby (z<{nearby}) sources: {cumulative_nu_flux(nearby)[-1] / nu_at_horizon:.2g}"
        )

    plt.figure()
    plt.plot(zrange, rate_per_z(zrange))
    plt.yscale("log")
    plt.ylabel("Differential Source Rate [year$^{-1}$ dz]")
    plt.xlabel("Redshift")
    plt.tight_layout()
    plt.savefig(savedir + 'diff_source_count.pdf')
    plt.close()

    plt.figure()
    plt.plot(zrange[1:-1], [x.value for x in cumulative_z(rate_per_z, zrange)])
    plt.yscale("log")
    plt.ylabel("Cumulative Sources")
    plt.xlabel("Redshift")
    plt.tight_layout()
    plt.savefig(savedir + 'integrated_source_count.pdf')
    plt.close()

    plt.figure()
    plt.plot(zrange[1:-1], nu_flux_per_z(zrange[1:-1]))
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.tight_layout()
    plt.savefig(savedir + 'diff_vol_contribution.pdf')
    plt.close()

    cum_nu = [x.value for x in cumulative_nu_flux(zrange)]

    plt.figure()
    plt.plot(zrange[1:-1], cum_nu)
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.ylabel(r"Cumulative Neutrino Flux [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$ ]")
    plt.axhline(y=diffuse_flux.value, color="red", linestyle="--")
    plt.tight_layout()
    plt.savefig(savedir + 'int_nu_flux_contribution.pdf')
    plt.close()

    plt.figure()
    plt.plot(zrange[1:-1], [nu_flux_per_z(z).value for z in zrange[1:-1]])
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.ylabel(
        r"Differential Neutrino Flux [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$ dz]")
    plt.axhline(y=diffuse_flux.value, color="red", linestyle="--")
    plt.tight_layout()
    plt.savefig(savedir + 'diff_nu_flux_contribution.pdf')
    plt.close()

    plt.figure()
    plt.plot(zrange[1:-1],
             [(nu_flux_per_source(z)).value for z in zrange[1:-1]])
    plt.yscale("log")
    plt.xlabel("Redshift")
    plt.ylabel(r"Time-Integrated Flux per Source [ GeV$^{-1}$ cm$^{-2}$]")
    plt.tight_layout()
    plt.savefig(savedir + 'nu_flux_per_source_contribution.pdf')
    plt.close()

    return nu_at_horizon
Exemplo n.º 17
0
vv.ROW_LIMIT = -1
result = vv.query_region(stereo_to_sun, radius=4 * u.deg, catalog='I/345/gaia2')

###############################################################################
# Let's see how many stars we've found.

print(len(result[0]))

###############################################################################
# Now we load all stars into an array coordinate.  The reference epoch for the
# star positions is J2015.5, # so we update these positions to the date of the
# COR2 observation using :meth:`astropy.coordinates.SkyCoord.apply_space_motion`.

tbl_crds = SkyCoord(ra=result[0]['RA_ICRS'],
                    dec=result[0]['DE_ICRS'],
                    distance=Distance(parallax=u.Quantity(result[0]['Plx'])),
                    pm_ra_cosdec=result[0]['pmRA'],
                    pm_dec=result[0]['pmDE'],
                    radial_velocity=result[0]['RV'],
                    frame='icrs',
                    obstime=Time(result[0]['Epoch'], format='jyear'))
tbl_crds = tbl_crds.apply_space_motion(new_obstime=cor2.date)

###############################################################################
# One of the bright features is actually Mars, so let's also get that coordinate.

mars = get_body_heliographic_stonyhurst('mars', cor2.date, observer=cor2.observer_coordinate)

###############################################################################
# Let's plot the results.  The coordinates will be transformed automatically
# when plotted using :meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord`.
Exemplo n.º 18
0
def make_catalog(ra: float, dec: float, radius: float, year: float,
                 outfile=None, return_data=False
                 ) -> Union[int, Tuple[int, Table]]:
    """
    Make a catalogue of Gaia/2MASS point sources based on a circle of center
    "ra" and "dec" of "radius" [arcsec]

    Output table has following columns
    index, ra, dec, kmag, kmag, kmag

    and is saved in "outfile"

    :param ra: float, the Right Ascension in degrees
    :param dec: float, the Declination in degrees
    :param radius: float, the field radius (in arc seconds)
    :param year: float, the decimal year (to propagate ra/dec with proper
                 motion/plx)

    :return: returns position of target in source list table (if return_data is
             False else returns a tuple 1. the position of target in source
             list table, 2. the Table of sources centered on the ra/dec
    """
    print('='*50)
    print('Field Catalog Generator')
    print('='*50)

    # log input parameters
    print('\nMaking catalog for field centered on:')
    print('\t RA: {0}'.format(ra))
    print('\t DEC: {0}'.format(ra))
    print('\n\tradius = {0} arcsec'.format(radius))
    print('\tObservation date: {0}'.format(year))

    # get observation time
    with warnings.catch_warnings(record=True) as _:
        obs_time = Time(year, format='decimalyear')
    # get center as SkyCoord
    coord_cent = SkyCoord(ra * uu.deg, dec * uu.deg)

    # -------------------------------------------------------------------------
    # Query Gaia - need proper motion etc
    # -------------------------------------------------------------------------
    # construct gaia query
    gaia_query = GAIA_QUERY.format(RA=ra, DEC=dec, RADIUS=radius/3600.0,
                                   GAIA_TWOMASS_ID=GAIA_TWOMASS_ID)
    # define gaia time
    gaia_time = Time('2015.5', format='decimalyear')
    # run Gaia query
    print('\nQuerying Gaia field\n')
    gaia_table = tap_query(GAIA_URL, gaia_query)
    # -------------------------------------------------------------------------
    # Query 2MASS - need to get J, H and Ks mag
    # -------------------------------------------------------------------------
    jmag, hmag, kmag, tmass_id = [], [], [], []
    # now get 2mass magnitudes for each entry
    for row in range(len(gaia_table)):
        # log progress
        pargs = [row + 1, len(gaia_table)]
        print('Querying 2MASS source {0} / {1}'.format(*pargs))
        # query 2MASS for magnitudes
        tmass_query = TWOMASS_QUERY.format(ID=gaia_table[GAIA_TWOMASS_ID][row],
                                           TWOMASS_ID=TWOMASS_ID)
        # run 2MASS query
        tmass_table = tap_query(TWOMASS_URL, tmass_query)
        # deal with no entry
        if tmass_query is None:
            jmag.append(np.nan)
            hmag.append(np.nan)
            kmag.append(np.nan)
            tmass_id.append('NULL')
        else:
            jmag.append(tmass_table['jmag'][0])
            hmag.append(tmass_table['hmag'][0])
            kmag.append(tmass_table['kmag'][0])
            tmass_id.append(tmass_table[TWOMASS_ID][0])
    # add columns to table
    gaia_table['JMAG'] = jmag
    gaia_table['HMAG'] = hmag
    gaia_table['KMAG'] = kmag
    gaia_table[TWOMASS_ID] = tmass_id
    # -------------------------------------------------------------------------
    # Clean up table - remove all entries without 2MASS
    # -------------------------------------------------------------------------
    # remove rows with NaNs in 2MASS magnitudes
    mask = np.isfinite(gaia_table['JMAG'])
    mask &= np.isfinite(gaia_table['HMAG'])
    mask &= np.isfinite(gaia_table['KMAG'])
    # mask table
    cat_table = gaia_table[mask]
    # -------------------------------------------------------------------------
    # Apply space motion
    # -------------------------------------------------------------------------
    # get entries as numpy arrays (with units)
    ra_arr = np.array(cat_table['ra']) * uu.deg
    dec_arr = np.array(cat_table['dec']) * uu.deg
    pmra_arr = np.array(cat_table['pmra']) * uu.mas/uu.yr
    pmde_arr = np.array(cat_table['pmde']) * uu.mas/uu.yr
    plx_arr = np.array(cat_table['plx']) * uu.mas
    # Get sky coords instance
    coords0 = SkyCoord(ra_arr, dec_arr,
                       pm_ra_cosdec=pmra_arr, pm_dec=pmde_arr,
                       distance=Distance(parallax=plx_arr),
                       obstime=gaia_time)
    # apply space motion
    with warnings.catch_warnings(record=True) as _:
        coords1 = coords0.apply_space_motion(obs_time)
    # find our target source (closest to input)
    separation = coord_cent.separation(coords0)
    # sort rest by brightness
    order = np.argsort(cat_table['KMAG'])
    # get the source position (after ordering separation)
    # assume our source is closest to the center
    source_pos = int(np.argmin(separation[order]))
    # -------------------------------------------------------------------------
    # make final table
    # -------------------------------------------------------------------------
    # start table instance
    final_table = Table()
    # index column
    final_table['index'] = np.arange(len(coords1))
    # ra column
    final_table[RA_OUTCOL] = coords1.ra.value[order]
    # dec column
    final_table[DEC_OUTCOL] = coords1.dec.value[order]
    # mag columns
    final_table[F380M_OUTCOL] = cat_table['KMAG'][order]
    final_table[F430M_OUTCOL] = cat_table['KMAG'][order]
    final_table[F480M_OUTCOL] = cat_table['KMAG'][order]
    # -------------------------------------------------------------------------
    # deal with return data
    if return_data:
        return source_pos, final_table
    # -------------------------------------------------------------------------
    # write file
    write_catalog(final_table, outfile)
    # return the position closest to the input coordinates
    return source_pos
Exemplo n.º 19
0
def finalPLot(fig, gs, reg_name, AD_data_pm, AD_data, mmag_plx, mp_plx, plx,
              e_plx, plx_bay, ph_plx, pl_plx, plx_out, e_plx_out, dm_asteca,
              e_dm):
    '''
    Parallax versus magnitude, parallax KDEs, and final results.
    '''

    # x_max_cmd, x_min_cmd, y_min_cmd, y_max_cmd = diag_limits(
    #     'mag', col_plx, mmag_plx)

    # plt.style.use('seaborn-darkgrid')

    ax = plt.subplot(gs[0:2, 0:2])
    ax.set_title("{}".format(reg_name), fontsize=12, x=.13, y=.94)
    plt.xlabel('Plx [mas]', fontsize=12)
    plt.ylabel('G', fontsize=12)
    # Set minor ticks
    ax.minorticks_on()
    # ax.axvspan(-100., 0., alpha=0.25, color='grey', zorder=1)

    # Weighted average and its error.
    # Source: https://physics.stackexchange.com/a/329412/8514
    plx_w = 1. / e_plx
    # e_plx_w = np.sqrt(np.sum(np.square(e_plx * plx_w))) / np.sum(plx_w)
    plx_wa = np.average(plx, weights=plx_w)

    cm = plt.cm.get_cmap('RdYlBu_r')  # viridis
    # Plot stars selected to be used in the best fit process.
    plt.scatter(plx,
                mmag_plx,
                marker='o',
                c=mp_plx,
                s=30,
                edgecolors='black',
                cmap=cm,
                lw=0.35,
                zorder=4,
                label=None)
    ax.errorbar(plx,
                mmag_plx,
                xerr=e_plx,
                fmt='none',
                elinewidth=.35,
                ecolor='grey',
                label=None)

    # Bayesian
    # d_pc = Distance((1000. * round(plx_bay, 2)), unit='pc')
    # dl_pc = Distance((1000. * round(pl_plx, 2)), unit='pc')
    # dh_pc = Distance((1000. * round(ph_plx, 2)), unit='pc')
    d_pc = Distance(1000. * plx_bay, unit='pc')
    dl_pc = Distance(1000. * pl_plx, unit='pc')
    dh_pc = Distance(1000. * ph_plx, unit='pc')
    plt.axvline(x=1. / plx_bay,
                linestyle='--',
                color='b',
                lw=1.2,
                zorder=5,
                label=r"$Plx_{{Bayes}}$")

    plx_asteca = 1000. / 10**((dm_asteca + 5.) / 5.)
    pc50_asteca = round(1. / plx_asteca, 2)
    e_pc_asteca = round(.2 * np.log(10.) * pc50_asteca * e_dm, 2)
    pc16_asteca = pc50_asteca - e_pc_asteca
    pc84_asteca = pc50_asteca + e_pc_asteca
    pc50_asteca, pc16_asteca, pc84_asteca = 1000. * pc50_asteca,\
        1000. * pc16_asteca, 1000. * pc84_asteca
    plt.axvline(x=plx_asteca,
                linestyle=':',
                color='g',
                lw=1.5,
                zorder=5,
                label=r"$Plx_{{AsteCA}}$")

    # Weighted average
    plt.axvline(x=plx_wa,
                linestyle='--',
                color='r',
                lw=.85,
                zorder=5,
                label=r"$Plx_{{wa}}$")
    # Median
    plx_gr_zero = plx[plx > 0.]
    plt.axvline(x=np.median(plx_gr_zero),
                linestyle='--',
                color='k',
                lw=.85,
                zorder=5,
                label=r"$Plx_{{>0|med}}$")

    ax.legend(fontsize=12, loc=4)

    cbar = plt.colorbar(pad=.01, fraction=.02, aspect=50)
    cbar.ax.tick_params(labelsize=10)
    # cbar.set_label('MP', size=8)

    min_plx, max_plx = np.median(plx) - 2. * np.std(plx),\
        np.median(plx) + 2.5 * np.std(plx)
    plt.xlim(min_plx, max_plx)
    # ax.set_ylim(ax.get_ylim()[::-1])
    plt.gca().invert_yaxis()

    #
    AD_plx, cv_ka, pv_plx, x_cl_kde, y_cl_kde, x_fl_kde, y_fl_kde,\
        x_fl_kde_max, x_fl_kde_min = AD_data
    ax = plt.subplot(gs[0:2, 2:4])
    ax.minorticks_on()
    # ax.axvspan(-100., 0., alpha=0.25, color='grey', zorder=1)
    plt.xlabel('Plx [mas]', fontsize=12)
    plt.plot(x_fl_kde,
             y_fl_kde / max(y_fl_kde),
             color='k',
             lw=1.,
             ls='--',
             zorder=4,
             label="Field region")
    plt.plot(x_cl_kde,
             y_cl_kde / max(y_cl_kde),
             color='r',
             lw=1.5,
             zorder=6,
             label="Cluster region")
    plt.axvline(x=1. / plx_bay,
                linestyle='--',
                color='b',
                lw=1.2,
                zorder=5,
                label=r"$Plx_{Bayes}$")
    plt.axvline(x=plx_asteca,
                linestyle=':',
                color='g',
                lw=1.5,
                zorder=5,
                label=r"$Plx_{ASteCA}$")
    ax.legend(fontsize=12, loc=0)
    plt.xlim(x_fl_kde_max, x_fl_kde_min)
    plt.ylim(-.01, 1.05)

    # Add text box to the right of the synthetic cluster.
    ax_t = plt.subplot(gs[0:2, 4:6])
    ax_t.axis('off')  # Remove axis from frame.
    t0 = r"$d_{{Bayes}}={:.0f}_{{{:.0f}}}^{{{:.0f}}}\;[pc]$".format(
        d_pc.value, dl_pc.value, dh_pc.value)
    t1 = r"$(Plx_{{Bayes}}={:.3f},\;\mu_{{0}}={:.2f})$".format(
        1. / plx_bay, d_pc.distmod.value)
    t2 = r"$d_{{ASteCA}}={:.0f}_{{{:.0f}}}^{{{:.0f}}}\;[pc]$".format(
        pc50_asteca, pc16_asteca, pc84_asteca)
    t3 = r"$(Plx_{{ASteCA}}={:.3f},\;\mu_{{0}}={:.2f})$".format(
        plx_asteca, dm_asteca)
    t4 = r"$Plx_{{wa}} = {:.3f}$".format(plx_wa)
    t5 = r"$Plx_{{>0|med}} = {:.3f}$".format(np.median(plx_gr_zero))

    AD_ra, _, pv_ra = AD_data_pm[0]
    AD_dec, _, pv_dec = AD_data_pm[1]
    comb_p = combine_pvalues(np.array([pv_plx, pv_ra, pv_dec]))
    t6 = (r"$AD_{{Plx}}={:.3f},\;pvalue={:.3f}$").format(AD_plx, pv_plx)
    t7 = (r"$AD_{{PM(\alpha)}}={:.3f},\;pvalue={:.3f}$").format(AD_ra, pv_ra)
    t8 = (r"$AD_{{PM(\delta)}}={:.3f},\;pvalue={:.3f}$").format(AD_dec, pv_dec)
    # t9 = (r"$(cv_{{0.05}}={:.3f})$").format(cv_ka[2])
    t9 = "Combined " + r"$pvalue={:.3f}$".format(comb_p[1])

    text = t0 + '\n\n' + t1 + '\n\n' + t2 + '\n\n' + t3 + '\n\n' + t4 +\
        ' ; ' + t5 + '\n\n\n' + t6 + '\n\n' + t7 + '\n\n' + t8 + '\n\n' + t9
    ob = offsetbox.AnchoredText(text,
                                pad=1,
                                loc=6,
                                borderpad=-2,
                                prop=dict(size=13))
    ob.patch.set(alpha=0.85)
    ax_t.add_artist(ob)
Exemplo n.º 20
0

################################################

if not build_skydistances:
    print("Skipping Sky Distances...")
else:
    ## Build Sky Distance Objects ##
    print("\n\nBuilding Sky Distances...")

    sky_distances = []
    for i in range(len(distance_at_resolution_change)):

        start_vol = 0.0 * u.Mpc**3
        end_vol = cosmo.comoving_volume(
            (Distance(distance_at_resolution_change[i], u.Mpc)).z)
        start_distance = 0.0 * u.Mpc

        if i > 0:
            start_vol = cosmo.comoving_volume(
                (Distance(distance_at_resolution_change[i - 1], u.Mpc)).z)
            start_distance = sky_distances[-1].D2 * u.Mpc

        sky_distances += compute_sky_distance(start_vol, end_vol,
                                              steps_in_resolution_bin[i],
                                              start_distance, frac[i],
                                              nsides[i])

    if isDEBUG:
        print("\nGenerated distances:\n")
        for d in sky_distances:
Exemplo n.º 21
0
def plot_isocs_lewis(plot_path, pipeline, dataset):
    fig = Figure(figsize=(6.5, 5.), frameon=False)
    canvas = FigureCanvas(fig)
    gs = gridspec.GridSpec(2,
                           3,
                           left=0.08,
                           right=0.85,
                           bottom=0.08,
                           top=0.95,
                           wspace=0.15,
                           hspace=0.25,
                           width_ratios=(1, 1, 0.1),
                           height_ratios=(0.1, 1.))
    cax_ages = fig.add_subplot(gs[0, 0])
    cax_phases = fig.add_subplot(gs[1, 2])
    ax_ages = fig.add_subplot(gs[1, 0])
    ax_phases = fig.add_subplot(gs[1, 1])

    isoc_set = get_demo_age_grid(
        **dict(isoc_kind='parsec_CAF09_v1.2S', photsys_version='yang'))

    # Get extinction in F475W and F160W
    Av = mw_Av()
    rel_av = phat_rel_extinction()
    A_475 = rel_av[2] * Av
    A_814 = rel_av[3] * Av

    plane_key = 'lewis'

    # Plot the observed Hess diagram  in each axes
    for ax in [ax_ages, ax_phases]:
        pipeline.plot_obs_hess(ax_ages, dataset, plane_key, imshow=None)
        pipeline.plot_obs_hess(ax_phases, dataset, plane_key, imshow=None)

    # Plot isochrones by age
    cmap = palettable.cubehelix.perceptual_rainbow_16.mpl_colormap
    scalar_map = mpl.cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=7.,
                                                                 vmax=10.1),
                                       cmap=cmap)
    scalar_map.set_array(np.array([isoc.age for isoc in isoc_set]))

    d = Distance(785 * u.kpc)
    for isoc in isoc_set:
        ax_ages.plot(isoc['F475W'] + A_475 - isoc['F814W'] + A_814,
                     isoc['F814W'] + A_814 + d.distmod.value,
                     c=scalar_map.to_rgba(np.log10(isoc.age)))
    cax_ages = plt.colorbar(mappable=scalar_map,
                            cax=cax_ages,
                            ax=ax_ages,
                            orientation='horizontal')
    cax_ages.set_label(r"$\log(A/\mathrm{yr})$")

    # Plot phases
    phase_labels = {
        0: 'Pre-MS',
        1: 'MS',
        2: 'SGB',
        3: 'RGB',
        4: 'CHeB(1)',
        5: 'CHeB(2)',
        6: 'CHeB(3)',
        7: 'E-AGB',
        8: 'TP-AGB'
    }
    cmap = mpl.colors.ListedColormap(
        palettable.colorbrewer.qualitative.Set1_9.mpl_colors)
    scalar_map = mpl.cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=-0.5,
                                                                 vmax=8.5),
                                       cmap=cmap)
    scalar_map.set_array(np.array(range(0, 9)))

    d = Distance(785 * u.kpc)
    for isoc in isoc_set:
        phases = np.unique(isoc['stage'])
        srt = np.argsort(phases)
        phases = phases[srt]
        for p in phases:
            s = np.where(isoc['stage'] == p)[0]
            ax_phases.plot(isoc['F475W'][s] + A_475 - isoc['F814W'][s] + A_814,
                           isoc['F814W'][s] + A_814 + d.distmod.value,
                           c=scalar_map.to_rgba(p),
                           lw=0.8)
    cb_phases = plt.colorbar(mappable=scalar_map,
                             cax=cax_phases,
                             ax=ax_phases,
                             ticks=range(0, 9),
                             orientation='vertical')
    # for tl in ax.get_xmajorticklabels():
    #     tl.set_visible(False)
    # for label in cb_phases.ax.get_xmajorticklabels():
    #     label.set_rotation('vertical')
    cb_phases.ax.set_yticklabels([phase_labels[p] for p in range(0, 9)])
    cb_phases.set_label(r"Stage")
    # cb_phases.update_ticks()

    for ax in [ax_ages, ax_phases]:
        ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=0.5))

    for tl in ax_phases.get_ymajorticklabels():
        tl.set_visible(False)
    ax_phases.set_ylabel('')

    gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)
    canvas.print_figure(plot_path + ".pdf", format="pdf")
Exemplo n.º 22
0
                 position=0,
                 leave=False,
                 colour='green'):

    p = {x: sample_table.loc[name][x] for x in sample_table.columns}

    filename = data_ext / 'MUSE_DR2.1' / 'Nebulae catalogue' / 'spectra' / f'{name}_VorSpectra.fits'
    with fits.open(filename) as hdul:
        spectra = Table(hdul[1].data)
        spectral_axis = np.exp(Table(hdul[2].data)['LOGLAM']) * u.Angstrom

    spectra['region_ID'] = np.arange(len(spectra))
    spectra.add_index('region_ID')

    H0 = 67 * u.km / u.s / u.Mpc
    z = (H0 * Distance(distmod=p['(m-M)']) / c.c).decompose()
    lam_HA0 = 6562.8 * u.Angstrom
    lam_HA = (1 + z) * lam_HA0

    sub = nebulae[nebulae['gal_name'] == name][[
        'gal_name', 'region_ID', 'eq_width', 'HA6562_FLUX'
    ]]

    for row in tqdm(sub, position=1, leave=False, colour='red', desc=name):
        try:
            region_ID = row['region_ID']
            flux = spectra.loc[region_ID]['SPEC'] * u.erg / u.s / u.cm**2 / u.A
            fit = fit_emission_line(spectral_axis, flux, lam_HA)
            integrated_flux = fit.amplitude_0 * np.sqrt(np.pi) * np.exp(
                -1 / (2 * fit.stddev_0**2)) * u.erg / u.s / u.cm**2
            continuum = fit.c0_1 * u.erg / u.s / u.cm**2 / u.Angstrom
Exemplo n.º 23
0
def test_representations_api():
    from astropy.coordinates.representation import SphericalRepresentation, \
        UnitSphericalRepresentation, PhysicsSphericalRepresentation, \
        CartesianRepresentation
    from astropy.coordinates import Angle, Longitude, Latitude, Distance

    # <-----------------Classes for representation of coordinate data-------------->
    # These classes inherit from a common base class and internally contain Quantity
    # objects, which are arrays (although they may act as scalars, like numpy's
    # length-0  "arrays")

    # They can be initialized with a variety of ways that make intuitive sense.
    # Distance is optional.
    UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg)
    UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
    SphericalRepresentation(lon=8 * u.hourangle,
                            lat=5 * u.deg,
                            distance=10 * u.kpc)

    # In the initial implementation, the lat/lon/distance arguments to the
    # initializer must be in order. A *possible* future change will be to allow
    # smarter guessing of the order.  E.g. `Latitude` and `Longitude` objects can be
    # given in any order.
    UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
    SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg),
                            Distance(10, u.kpc))

    # Arrays of any of the inputs are fine
    UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)

    # Default is to copy arrays, but optionally, it can be a reference
    UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
                                lat=[5, 6] * u.deg,
                                copy=False)

    # strings are parsed by `Latitude` and `Longitude` constructors, so no need to
    # implement parsing in the Representation classes
    UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad'))

    # Or, you can give `Quantity`s with keywords, and they will be internally
    # converted to Angle/Distance
    c1 = SphericalRepresentation(lon=8 * u.hourangle,
                                 lat=5 * u.deg,
                                 distance=10 * u.kpc)

    # Can also give another representation object with the `reprobj` keyword.
    c2 = SphericalRepresentation.from_representation(c1)

    #  distance, lat, and lon typically will just match in shape
    SphericalRepresentation(lon=[8, 9] * u.hourangle,
                            lat=[5, 6] * u.deg,
                            distance=[10, 11] * u.kpc)
    # if the inputs are not the same, if possible they will be broadcast following
    # numpy's standard broadcasting rules.
    c2 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
                                 lat=[5, 6] * u.deg,
                                 distance=10 * u.kpc)
    assert len(c2.distance) == 2
    # when they can't be broadcast, it is a ValueError (same as Numpy)
    with raises(ValueError):
        c2 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
                                         lat=[5, 6] * u.deg)

    # It's also possible to pass in scalar quantity lists with mixed units. These
    # are converted to array quantities following the same rule as `Quantity`: all
    # elements are converted to match the first element's units.
    c2 = UnitSphericalRepresentation(
        lon=Angle([8 * u.hourangle, 135 * u.deg]),
        lat=Angle([5 * u.deg, (6 * np.pi / 180) * u.rad]))
    assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle
    npt.assert_almost_equal(c2.lon[1].value, 9)

    # The Quantity initializer itself can also be used to force the unit even if the
    # first element doesn't have the right unit
    lon = u.Quantity([120 * u.deg, 135 * u.deg], u.hourangle)
    lat = u.Quantity([(5 * np.pi / 180) * u.rad, 0.4 * u.hourangle], u.deg)
    c2 = UnitSphericalRepresentation(lon, lat)

    # regardless of how input, the `lat` and `lon` come out as angle/distance
    assert isinstance(c1.lat, Angle)
    assert isinstance(c1.lat, Latitude)  # `Latitude` is an `Angle` subclass
    assert isinstance(c1.distance, Distance)

    # but they are read-only, as representations are immutable once created
    with raises(AttributeError):
        c1.lat = Latitude(5, u.deg)
    # Note that it is still possible to modify the array in-place, but this is not
    # sanctioned by the API, as this would prevent things like caching.
    c2.lat[:] = [0] * u.deg  # possible, but NOT SUPPORTED

    # To address the fact that there are various other conventions for how spherical
    # coordinates are defined, other conventions can be included as new classes.
    # Later there may be other conventions that we implement - for now just the
    # physics convention, as it is one of the most common cases.
    c3 = PhysicsSphericalRepresentation(phi=120 * u.deg,
                                        theta=85 * u.deg,
                                        r=3 * u.kpc)

    # first dimension must be length-3 if a lone `Quantity` is passed in.
    c1 = CartesianRepresentation(np.random.randn(3, 100) * u.kpc)
    assert c1.xyz.shape[0] == 3
    assert c1.xyz.unit == u.kpc
    assert c1.x.shape[0] == 100
    assert c1.y.shape[0] == 100
    assert c1.z.shape[0] == 100
    # can also give each as separate keywords
    CartesianRepresentation(x=np.random.randn(100) * u.kpc,
                            y=np.random.randn(100) * u.kpc,
                            z=np.random.randn(100) * u.kpc)
    # if the units don't match but are all distances, they will automatically be
    # converted to match `x`
    xarr, yarr, zarr = np.random.randn(3, 100)
    c1 = CartesianRepresentation(x=xarr * u.kpc,
                                 y=yarr * u.kpc,
                                 z=zarr * u.kpc)
    c2 = CartesianRepresentation(x=xarr * u.kpc, y=yarr * u.kpc, z=zarr * u.pc)
    assert c1.xyz.unit == c2.xyz.unit == u.kpc
    assert_allclose((c1.z / 1000) - c2.z, 0 * u.kpc, atol=1e-10 * u.kpc)

    # representations convert into other representations via  `represent_as`
    srep = SphericalRepresentation(lon=90 * u.deg,
                                   lat=0 * u.deg,
                                   distance=1 * u.pc)
    crep = srep.represent_as(CartesianRepresentation)
    assert_allclose(crep.x, 0 * u.pc, atol=1e-10 * u.pc)
    assert_allclose(crep.y, 1 * u.pc, atol=1e-10 * u.pc)
    assert_allclose(crep.z, 0 * u.pc, atol=1e-10 * u.pc)
Exemplo n.º 24
0
    def __init__(self,
                 origin='ssb',
                 frame='eclipJ2000',
                 units=Units(),
                 *args,
                 **kwargs):

        coords = self.detect_coords(kwargs)
        #frame = frame.lower()
        origin = origin.lower()

        # input -> arrays
        for key in [*kwargs]:
            k = kwargs.get(key)
            if not isinstance(k, np.ndarray):
                if isinstance(k, list):
                    if key == 'name':
                        kwargs[key] = np.array(k, dtype=object)
                    else:
                        kwargs[key] = np.array(k)
                else:
                    kwargs[key] = np.atleast_1d(k)

        self.frame = frame

        self.origin = origin

        if self.origin == 'ssb':
            self.mu = mu_bary
        else:
            o = SpiceBody(spiceid=self.origin)
            self.mu = o.mu

        if coords == 'kep':

            if kwargs.get('a') is not None:
                self.a = Distance(kwargs.get('a'),
                                  units.distance,
                                  allow_negative=True)

            if kwargs.get('b') is not None:
                self.b = Distance(kwargs.get('b'),
                                  units.distance,
                                  allow_negative=True)

            if kwargs.get('v_inf') is not None:
                self.v_inf = (kwargs.get('v_inf') * units.speed).to(u.au /
                                                                    u.day)

            if kwargs.get('e') is not None:
                self.e = kwargs.get('e')
                if np.any(self.e < 0):
                    raise ValueError('Eccentricity must be positive')
                if hasattr(self, '_a'):
                    if np.any((self.a.au < 0) * (self.e < 1)):
                        raise ValueError(
                            'Orbital elements mismatch. a must be positive for e < 1.'
                        )
                    if np.any((self.a.au > 0) * (self.e > 1)):
                        raise ValueError(
                            'Orbital elements mismatch. a must be negative for e < 1.'
                        )

            self.inc = Angle(kwargs.get('inc'), units.angle)

            if kwargs.get('q') is not None:
                self.q = Distance(kwargs.get('q'), units.distance)
                if np.any(self.q.au < 0):
                    raise ValueError('pericenter distance must be positive')

            if kwargs.get('Q') is not None:
                self.Q = Distance(kwargs.get('Q'), units.distance)
                if np.any(self.Q.au < 0):
                    raise ValueError('apocenter distance must be positive')
                if np.any(self.Q < self.q):
                    raise ValueError(
                        'Apocenter distance must be >= pericenter distance.')

            if kwargs.get('node') is not None:
                self.node = Angle(kwargs.get('node'), units.angle)

            if kwargs.get('arg') is not None:
                self.arg = Angle(kwargs.get('arg'), units.angle)
                if np.any(self.arg.deg[self.e < 1e-8] != 0):
                    raise ValueError('Cannot have e = 0 with arg != 0')

            if kwargs.get('varpi') is not None:
                self.varpi = Angle(kwargs.get('varpi'), units.angle)

            if kwargs.get('t_peri') is not None:
                if units.timeformat is None:
                    self.t_peri = self.detect_timescale(
                        kwargs.get('t_peri'), units.timescale)
                else:
                    self.t_peri = Time(kwargs.get('t_peri'),
                                       format=units.timeformat,
                                       scale=units.timescale)

            if kwargs.get('M') is not None:
                self.M = Angle(kwargs.get('M'), units.angle)

            if kwargs.get('E') is not None:
                self.E = Angle(kwargs.get('E'), units.angle)

            if kwargs.get('f') is not None:
                self.f = Angle(kwargs.get('f'), units.angle)

            if kwargs.get('true_longitude') is not None:
                self.true_longitude = Angle(kwargs.get('true_longitude'),
                                            units.angle)

            if kwargs.get('mean_longitude') is not None:
                self.mean_longitude = Angle(kwargs.get('mean_longitude'),
                                            units.angle)

            if kwargs.get('epoch') is not None:
                if units.timeformat is None:
                    self.epoch = self.detect_timescale(kwargs.get('epoch'),
                                                       units.timescale)
                else:
                    self.epoch = Time(kwargs.get('epoch'),
                                      format=units.timeformat,
                                      scale=units.timescale)
            else:
                self.epoch = Time(np.zeros(len(self.inc)),
                                  format='jd',
                                  scale='utc')

            if kwargs.get('name') is not None:
                self.name = np.atleast_1d(kwargs.get('name'))
            else:
                # produces random, non-repeting integers between 0 and 1e10 - 1
                #self.name = np.array(['{:010}'.format(value) for value in random.sample(range(int(1e10)), len(self.inc))])
                self.name = np.array(
                    [uuid.uuid4().hex for _ in range(len(self.inc))])

        elif coords == 'xyz':

            x = Distance(kwargs.get('x'), units.distance,
                         allow_negative=True).to(u.au)
            y = Distance(kwargs.get('y'), units.distance,
                         allow_negative=True).to(u.au)
            z = Distance(kwargs.get('z'), units.distance,
                         allow_negative=True).to(u.au)
            vx = (kwargs.get('vx') * units.speed).to(u.au / u.day)
            vy = (kwargs.get('vy') * units.speed).to(u.au / u.day)
            vz = (kwargs.get('vz') * units.speed).to(u.au / u.day)

            self.position = Vector(x, y, z)
            self.velocity = Vector(vx, vy, vz)

            if kwargs.get('epoch') is not None:

                if units.timeformat is None:
                    self.epoch = self.detect_timescale(kwargs.get('epoch'),
                                                       units.timescale)
                else:
                    self.epoch = Time(kwargs.get('epoch'),
                                      format=units.timeformat,
                                      scale=units.timescale)
            else:
                self.epoch = Time(np.zeros(len(self.x)),
                                  format='jd',
                                  scale='utc')

            if kwargs.get('name') is not None:
                self.name = np.atleast_1d(kwargs.get('name'))
            else:
                # produces random, non-repeting integers between 0 and 1e10 - 1
                #self.name = np.array(['{:010}'.format(value) for value in random.sample(range(int(1e10)), len(self.x))])
                self.name = np.array(
                    [uuid.uuid4().hex for _ in range(len(self.x))])

        if kwargs.get('H') is not None:
            curves = kwargs.get('H')
            curve_funcs = []
            for curve in curves:
                if callable(curve):
                    curve_funcs.append(curve)
                else:
                    curve_funcs.append(lambda _, x=curve: x)
            self.H_func = np.array(curve_funcs)

            if kwargs.get('G') is not None:
                self.G = kwargs.get('G')
            else:
                self.G = np.repeat(0.15, len(self))

        if kwargs.get('mag') is not None:
            curves = kwargs.get('mag')
            curve_funcs = []
            for curve in curves:
                if callable(curve):
                    curve_funcs.append(curve)
                else:
                    curve_funcs.append(lambda _, x=curve: x)
            self.mag_func = np.array(curve_funcs)

        if kwargs.get('radius') is not None:
            self.radius = Distance(kwargs.get('radius'),
                                   units.size,
                                   allow_negative=False)

        if kwargs.get('mass') is not None:
            self.mass = kwargs.get('mass') * units.mass

        if kwargs.get('density') is not None:
            self.density = kwargs.get('density') * units.density
Exemplo n.º 25
0
def calculate_M31_Rgc_Wang2019(coordinates,
                               deproject=False,
                               debug=False,
                               M31_coord=coord.SkyCoord("0 42 44.3503",
                                                        "41 16 08.634",
                                                        frame="icrs",
                                                        equinox="J2000",
                                                        unit=(u.hourangle,
                                                              u.deg)),
                               M31_distance=Distance(780, unit=u.kpc)):
    # Preferred position is given by the NASA Extragalactic Database as
    # 00h42m44.3503s +41d16m08.634s Equatorial J2000.0  (2010ApJS..189...37E)
    #   M31 RA and Dec are also published by Kent (1989)
    # Distance McConnachie+ (2005), Conn+ (2012): 785 +/- 25 kpc; 779 +19/-18 kpc
    #   Alternatively, Freedman & Madore (1990) 773 +/- 36 kpc
    #   NED found 409 distance measurements /w mean 784 kpc, median 776 kpc.
    #   but in that table no homogenization or corrections have been applied.

    # Given coordinates
    test = coordinates[0]
    if not isinstance(coordinates, coord.SkyCoord):
        ra, dec = [], []
        for c in coordinates:
            ra.append(c.ra)
            dec.append(c.dec)
        coordinates = coord.SkyCoord(ra, dec)
    assert test.ra == coordinates[0].ra, "RA broke"
    assert test.dec == coordinates[0].dec, "DEC broke"

    alpha = coordinates.ra.radian
    delta = coordinates.dec.radian

    # M31
    alpha0 = M31_coord.ra.radian
    delta0 = M31_coord.dec.radian

    A = get_Wang2019_A(alpha, alpha0, delta)
    B = get_Wang2019_B(alpha, alpha0, delta, delta0)
    # Have to convert to arcmin b/c can't do sqrt(X^2 + Y^2) when x and y are angles
    X = Angle(get_Wang2019_X(A, B), unit=u.radian).arcmin
    Y = Angle(get_Wang2019_Y(A, B), unit=u.radian).arcmin

    if debug:
        print("A = {0}".format(A))
        print("B = {0}".format(B))
        print("X = {0}".format(X))
        print("Y = {0}\n".format(Y))

    if deproject:
        # De-projected two-dimensional galactocentric radius.
        Rproj = apply_Wang2019_deprojection(X, Y)
    else:
        # Projected two-dimensional galactocentric radius.
        Rproj = numpy.sqrt(X**2 + Y**2)  # in arcmin

    X = Distance(
        Angle(X, unit=u.arcmin).radian * M31_distance,
        unit=M31_distance.unit,
        allow_negative=True,
    ).value
    Y = Distance(
        Angle(Y, unit=u.arcmin).radian * M31_distance,
        unit=M31_distance.unit,
        allow_negative=True,
    ).value
    Rproj = Distance(Angle(Rproj, unit=u.arcmin).radian * M31_distance,
                     unit=M31_distance.unit).value

    return X, Y, Rproj
Exemplo n.º 26
0
    def luminosity(self):
        '''Takes the SED that was fit to the data and calcualtes the luminosity of the galaxy'''

        T_mcmc, beta_mcmc, log_A_mcmc = self.param_vals()
        l_nu = _c / (1000 * 1e-6)
        h_nu = _c / (15 * 1e-6)

        int_flux, int_err = quad(bb_int,
                                 l_nu,
                                 h_nu,
                                 args=(T_mcmc[0], beta_mcmc[0],
                                       10**(log_A_mcmc[0])))
        int_flux = int_flux * 1e-26
        int_err = int_err * 1e-26
        dl = Distance(z=self.z).value * 3.086 * 1e22
        L = 4 * np.pi * (dl**2) * int_flux
        #L_err = 4*np.pi*(dl**2)*int_err
        L_sun = constants.L_sun.value
        L_L_sun = L / L_sun
        #L_err_L_sun = L_err/L_sun

        # get luminosity error
        T_e = (T_mcmc[1] + T_mcmc[2]) / 2
        beta_e = (beta_mcmc[1] + beta_mcmc[2]) / 2
        log_A_e = (log_A_mcmc[1] + log_A_mcmc[2]) / 2
        MC_Ts = np.random.normal(T_mcmc[0], T_e, 10000)
        MC_betas = np.random.normal(beta_mcmc[0], beta_e, 10000)
        MC_las = np.random.normal(log_A_mcmc[0], log_A_e, 10000)
        Ls_e = []
        for i in range(len(MC_Ts)):
            i_f, i_e = quad(bb_int,
                            l_nu,
                            h_nu,
                            args=(MC_Ts[i], MC_betas[i], 10**MC_las[i]))
            i_f = i_f * 1e-26
            L_e = 4 * np.pi * (dl**2) * i_f
            L_e_sun = L_e / L_sun
            Ls_e.append(L_e_sun)

        L_err = np.std(Ls_e)

        Ls = {}
        Ls['L_IR'] = L
        Ls['L_IR/L_sun'] = L_L_sun
        Ls['L_error'] = L_err

        ############################################
        '''int_wvs = np.arange(15,1000,0.001) * 1e-6
        int_nus = list(_c/int_wvs)
        int_nus.reverse()
        int_nus_1 = np.array(int_nus)
        int_fluxes = list(bb(int_wvs,T_mcmc[0],beta_mcmc[0],10**(log_A_mcmc[0])))
        int_fluxes.reverse()
        int_fluxes_1 = np.array(int_fluxes)

        integrated_flux = simps(int_fluxes_1,int_nus_1) * 1e-26

        dl = Distance(z = self.z).value * 3.086 * 1e22

        L = 4*np.pi*(dl**2)*integrated_flux

        L_sun  = constants.L_sun.value

        L_L_sun = L/L_sun
        
        # get errors of luminosity
        int_fluxes_p_e = list(bb(int_wvs,T_mcmc[0] + T_mcmc[1],beta_mcmc[0] + beta_mcmc[1],10**(log_A_mcmc[0] + log_A_mcmc[1])))
        int_fluxes_p_e.reverse()
        int_fluxes_p_e_1 = np.array(int_fluxes_p_e)
        
        integrated_flux_p_e = simps(int_fluxes_p_e_1,int_nus_1) * 1e-26
        
        L_p_e = 4*np.pi*(dl**2)*integrated_flux_p_e
        L_p_e_sun = L_p_e/L_sun
        
        int_fluxes_n_e = list(bb(int_wvs,T_mcmc[0] - T_mcmc[2],beta_mcmc[0] - beta_mcmc[2],10**(log_A_mcmc[0] - log_A_mcmc[2])))
        int_fluxes_n_e.reverse()
        int_fluxes_n_e_1 = np.array(int_fluxes_n_e)
        
        integrated_flux_n_e = simps(int_fluxes_n_e_1,int_nus_1) * 1e-26
        
        L_n_e = 4*np.pi*(dl**2)*integrated_flux_n_e
        L_n_e_sun = L_n_e/L_sun

        Ls = {}
        Ls['L_IR'] = L
        Ls['L_IR/L_sun'] = L_L_sun
        Ls['p_error'] = L_p_e_sun - L_L_sun
        Ls['n_error'] = L_L_sun - L_n_e_sun'''

        return Ls
Exemplo n.º 27
0
from astropy.coordinates import Distance
from agnpy.emission_regions import Blob
import matplotlib.pyplot as plt
from agnpy.utils.plot import load_mpl_rc

# matplotlib adjustments
load_mpl_rc()

# set the spectrum normalisation (total energy in electrons in this case)
spectrum_norm = 1e48 * u.Unit("erg")
# define the spectral function parametrisation through a dictionary
spectrum_dict = {
    "type": "PowerLaw",
    "parameters": {
        "p": 2.8,
        "gamma_min": 1e2,
        "gamma_max": 1e7
    },
}
# set the remaining quantities defining the blob
R_b = 1e16 * u.cm
B = 1 * u.G
z = Distance(1e27, unit=u.cm).z
delta_D = 10
Gamma = 10
blob = Blob(R_b, z, delta_D, Gamma, B, spectrum_norm, spectrum_dict)

# plot the electron distribution
blob.plot_n_e(gamma_power=2)
plt.show()
Exemplo n.º 28
0
def test_distances():
    """
    Tests functionality for Coordinate class distances and cartesian
    transformations.
    """
    '''
    Distances can also be specified, and allow for a full 3D definition of a
    coordinate.
    '''

    # try all the different ways to initialize a Distance
    distance = Distance(12, u.parsec)
    Distance(40, unit=u.au)
    Distance(value=5, unit=u.kpc)

    # need to provide a unit
    with pytest.raises(u.UnitsError):
        Distance(12)

    # standard units are pre-defined
    npt.assert_allclose(distance.lyr, 39.138765325702551)
    npt.assert_allclose(distance.km, 370281309776063.0)

    # Coordinate objects can be assigned a distance object, giving them a full
    # 3D position
    c = Galactic(l=158.558650 * u.degree,
                 b=-43.350066 * u.degree,
                 distance=Distance(12, u.parsec))
    assert quantity_allclose(c.distance, 12 * u.pc)

    # or initialize distances via redshifts - this is actually tested in the
    # function below that checks for scipy. This is kept here as an example
    # c.distance = Distance(z=0.2)  # uses current cosmology
    # with whatever your preferred cosmology may be
    # c.distance = Distance(z=0.2, cosmology=WMAP5)

    # Coordinate objects can be initialized with a distance using special
    # syntax
    c1 = Galactic(l=158.558650 * u.deg,
                  b=-43.350066 * u.deg,
                  distance=12 * u.kpc)

    # Coordinate objects can be instantiated with cartesian coordinates
    # Internally they will immediately be converted to two angles + a distance
    cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc)
    c2 = Galactic(cart)

    sep12 = c1.separation_3d(c2)
    # returns a *3d* distance between the c1 and c2 coordinates
    # not that this does *not*
    assert isinstance(sep12, Distance)
    npt.assert_allclose(sep12.pc, 12005.784163916317, 10)
    '''
    All spherical coordinate systems with distances can be converted to
    cartesian coordinates.
    '''

    cartrep2 = c2.cartesian
    assert isinstance(cartrep2.x, u.Quantity)
    npt.assert_allclose(cartrep2.x.value, 2)
    npt.assert_allclose(cartrep2.y.value, 4)
    npt.assert_allclose(cartrep2.z.value, 8)

    # with no distance, the unit sphere is assumed when converting to cartesian
    c3 = Galactic(l=158.558650 * u.degree,
                  b=-43.350066 * u.degree,
                  distance=None)
    unitcart = c3.cartesian
    npt.assert_allclose(
        ((unitcart.x**2 + unitcart.y**2 + unitcart.z**2)**0.5).value, 1.0)

    # TODO: choose between these when CartesianRepresentation gets a definite
    # decision on whether or not it gets __add__
    #
    # CartesianRepresentation objects can be added and subtracted, which are
    # vector/elementwise they can also be given as arguments to a coordinate
    # system
    # csum = ICRS(c1.cartesian + c2.cartesian)
    csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz)
    csum = ICRS(csumrep)

    npt.assert_allclose(csumrep.x.value, -8.12016610185)
    npt.assert_allclose(csumrep.y.value, 3.19380597435)
    npt.assert_allclose(csumrep.z.value, -8.2294483707)
    npt.assert_allclose(csum.ra.degree, 158.529401774)
    npt.assert_allclose(csum.dec.degree, -43.3235825777)
    npt.assert_allclose(csum.distance.kpc, 11.9942200501)
    def main(self):

        # print(mp.cpu_count())

        prep_start = time.time()

        is_error = False

        # Parameter checks
        if self.options.gw_id == "":
            is_error = True
            print("GWID is required.")

        if self.options.healpix_file == "":
            is_error = True
            print("Healpix file is required.")

        if is_error:
            print("Exiting...")
            return 1

        formatted_healpix_dir = self.options.healpix_dir
        if "{GWID}" in formatted_healpix_dir:
            formatted_healpix_dir = formatted_healpix_dir.replace(
                "{GWID}", self.options.gw_id)

        formatted_model_output_dir = self.options.model_output_dir
        if "{GWID}" in formatted_model_output_dir:
            formatted_model_output_dir = formatted_model_output_dir.replace(
                "{GWID}", self.options.gw_id)

        hpx_path = "%s/%s" % (formatted_healpix_dir, self.options.healpix_file)
        model_path = "../LinearModels"

        model_files = []
        for file in os.listdir(model_path):
            if file.endswith(".dat"):
                model_files.append("%s/%s" % (model_path, file))

        if len(model_files) <= 0:
            is_error = True
            print("There are no models to process.")

        # Check if the above files exist...
        if not os.path.exists(hpx_path):
            is_error = True
            print("Healpix file `%s` does not exist." % hpx_path)

        if is_error:
            print("Exiting...")
            return 1

        # CONVENIENCE DICTIONARIES
        # Band abbreviation, band_id mapping
        band_mapping_new = {
            "sdss_g": "SDSS g",
            "sdss_r": "SDSS r",
            "sdss_i": "SDSS i",
            "Clear": "Clear"
        }

        reverse_band_mapping_new = {
            "SDSS g": "sdss_g",
            "SDSS r": "sdss_r",
            "SDSS i": "sdss_i",
            "Clear": "Clear"
        }

        detector_mapping = {
            "s": "SWOPE",
            "t": "THACHER",
            "a": "ANDICAM",
            "n": "NICKEL",
            "m": "MOSFIRE",
            "k": "KAIT",
            "si": "SINISTRO"
        }

        # LOADING NSIDE 128 SKY PIXELS AND EBV INFORMATION
        print("\nLoading NSIDE 128 pixels...")
        nside128 = 128
        N128_dict = None
        with open('N128_dict.pkl', 'rb') as handle:
            N128_dict = pickle.load(handle)
        del handle

        print("\nLoading existing EBV...")
        ebv = None
        with open('ebv.pkl', 'rb') as handle:
            ebv = pickle.load(handle)

        models = {}
        for index, mf in enumerate(model_files):
            model_table = Table.read(mf, format='ascii.ecsv')
            mask = model_table['time'] > 0.0

            model_time = np.asarray(model_table['time'][mask])
            g = np.asarray(model_table['sdss_g'][mask])
            r = np.asarray(model_table['sdss_r'][mask])
            i = np.asarray(model_table['sdss_i'][mask])
            clear = np.asarray(model_table['Clear'][mask])

            model_props = model_table.meta['comment']
            M = float(model_props[0].split("=")[1])
            dM = float(model_props[1].split("=")[1])

            base_name = os.path.basename(mf)
            print("Loading `%s`" % base_name)
            models[(M, dM)] = {
                'time': model_time,
                'sdss_g': g,
                'sdss_r': r,
                'sdss_i': i,
                'Clear': clear
            }

        # Get Map ID
        print("\nLoading Healpix Map...")
        healpix_map_select = "SELECT id, NSIDE FROM HealpixMap WHERE GWID = '%s' and Filename = '%s'"
        healpix_map_id = int(
            query_db([
                healpix_map_select %
                (self.options.gw_id, self.options.healpix_file)
            ])[0][0][0])
        healpix_map_nside = int(
            query_db([
                healpix_map_select %
                (self.options.gw_id, self.options.healpix_file)
            ])[0][0][1])

        # Get Bands
        print("\nLoading Configured Bands...")
        band_select = "SELECT id, Name, F99_Coefficient FROM Band"
        bands = query_db([band_select])[0]
        band_dict_by_name = {}
        band_dict_by_id = {}
        for b in bands:
            b_id = int(b[0])
            b_name = b[1]
            b_coeff = float(b[2])

            band_dict_by_name[b_name] = (b_id, b_name, b_coeff)
            band_dict_by_id[b_id] = (b_id, b_name, b_coeff)

        print("\nRetrieving distinct, imaged map pixels")
        map_pixel_select = '''
        SELECT 
            DISTINCT hp.id, 
            hp.HealpixMap_id, 
            hp.Pixel_Index, 
            hp.Prob, 
            hp.Distmu, 
            hp.Distsigma, 
            hp.Distnorm, 
            hp.Mean, 
            hp.Stddev, 
            hp.Norm, 
            sp.Pixel_Index as N128_Pixel_Index 
        FROM 
            HealpixPixel hp 
        JOIN ObservedTile_HealpixPixel ot_hp on ot_hp.HealpixPixel_id = hp.id 
        JOIN ObservedTile ot on ot.id = ot_hp.ObservedTile_id 
        JOIN SkyPixel sp on sp.id = hp.N128_SkyPixel_id 
        WHERE
            ot.HealpixMap_id = %s and 
            ot.Mag_Lim IS NOT NULL 
        '''

        q = map_pixel_select % healpix_map_id
        map_pixels = query_db([q])[0]
        print("Retrieved %s map pixels..." % len(map_pixels))

        # Initialize map pix dict for later access
        map_pixel_dict_new = OrderedDict()

        # map_pixel_dict_old = OrderedDict()
        class Pixel_Synopsis():
            def __init__(self, mean_dist, dist_sigma, prob_2D, pixel_index,
                         N128_index, pix_ebv, z):
                self.mean_dist = mean_dist
                self.dist_sigma = dist_sigma
                self.prob_2D = prob_2D
                self.pixel_index = pixel_index
                self.N128_index = N128_index
                self.pix_ebv = pix_ebv
                self.z = z

                self.distance_arr = None
                self.distance_modulus_arr = None
                self.distance_probs = None

                # From the tiles that contain this pixel
                # band:value
                self.measured_bands = []
                self.lim_mags = OrderedDict()
                self.delta_mjds = OrderedDict()

                # From the model (only select the bands that have been imaged)
                self.A_lambda = OrderedDict()  # band:value

                # model:arr
                self.model_observer_time_arr_new = OrderedDict()
                # self.model_observer_time_arr_old = None

                # Computed based on model + tile + pixel info
                # band:value
                # self.app_mag_model = {} # x-val
                # self.app_mag_model_prob = {} # y-val

                # Final calculation
                # model:band:value
                self.best_integrated_probs = OrderedDict()

            def __str__(self):
                return str(self.__dict__)

        count_bad_pixels = 0
        for p in map_pixels:
            mean_dist = float(p[7])
            dist_sigma = float(p[8])
            prob_2D = float(p[3])
            pixel_index = int(p[2])
            N128_pixel_index = int(p[10])
            pix_ebv = ebv[N128_pixel_index]

            d = Distance(mean_dist, u.Mpc)
            if mean_dist == 0.0:
                # distance did not converge for this pixel. pass...
                print("Bad Index: %s" % pixel_index)
                count_bad_pixels += 1
                continue

            z = d.compute_z(cosmology=cosmo)

            p_new = Pixel_Synopsis(mean_dist, dist_sigma, prob_2D, pixel_index,
                                   N128_pixel_index, pix_ebv, z)

            min_dist = mean_dist - 5.0 * dist_sigma
            if min_dist <= 0.0:
                min_dist = 0.001
            max_dist = mean_dist + 5.0 * dist_sigma

            distance_arr = np.linspace(min_dist, max_dist, 100)
            distance_modulus_arr = 5.0 * np.log10(distance_arr * 1e+6) - 5.0
            distance_probs = 1.0 / np.sqrt(
                2.0 * np.pi * dist_sigma**2) * np.exp(
                    -1.0 * (distance_arr - mean_dist)**2 / (2 * dist_sigma**2))

            p_new.distance_arr = distance_arr
            p_new.distance_modulus_arr = distance_modulus_arr
            p_new.distance_probs = distance_probs

            map_pixel_dict_new[pixel_index] = p_new

        print("\nMap pixel dict complete. %s bad pixels." % count_bad_pixels)

        # Get Detectors
        detectors = []
        print("\nLoading Swope...")
        detector_select = "SELECT id, Name, Deg_width, Deg_width, Deg_radius, Area, MinDec, MaxDec FROM Detector WHERE `Name`='%s'"
        dr = query_db([detector_select % 'SWOPE'])[0][0]
        swope = Detector(dr[1],
                         float(dr[2]),
                         float(dr[2]),
                         detector_id=int(dr[0]))
        detectors.append(swope)

        print("\nLoading Thacher...")
        dr = query_db([detector_select % 'THACHER'])[0][0]
        thacher = Detector(dr[1],
                           float(dr[2]),
                           float(dr[2]),
                           detector_id=int(dr[0]))
        detectors.append(thacher)

        print("\nLoading Nickel...")
        dr = query_db([detector_select % 'NICKEL'])[0][0]
        nickel = Detector(dr[1],
                          float(dr[2]),
                          float(dr[2]),
                          detector_id=int(dr[0]))
        detectors.append(nickel)

        print("\nLoading KAIT...")
        dr = query_db([detector_select % 'KAIT'])[0][0]
        kait = Detector(dr[1],
                        float(dr[2]),
                        float(dr[2]),
                        detector_id=int(dr[0]))
        detectors.append(kait)

        print("\nLoading SINISTRO...")
        dr = query_db([detector_select % 'SINISTRO'])[0][0]
        sinistro = Detector(dr[1],
                            float(dr[2]),
                            float(dr[2]),
                            detector_id=int(dr[0]))
        detectors.append(sinistro)

        # Get and instantiate all observed tiles
        observed_tile_select = '''
            SELECT 
                id,
                Detector_id, 
                FieldName, 
                RA, 
                _Dec, 
                EBV, 
                N128_SkyPixel_id, 
                Band_id, 
                MJD, 
                Exp_Time, 
                Mag_Lim, 
                HealpixMap_id 
            FROM
                ObservedTile 
            WHERE
                HealpixMap_id = %s and 
                Detector_id = %s and 
                Mag_Lim IS NOT NULL 
        '''

        observed_tiles = []

        print("\nLoading Swope's Observed Tiles...")
        ot_result = query_db(
            [observed_tile_select % (healpix_map_id, swope.id)])[0]
        for ot in ot_result:
            t = Tile(float(ot[3]),
                     float(ot[4]),
                     swope.deg_width,
                     swope.deg_height,
                     healpix_map_nside,
                     tile_id=int(ot[0]))
            t.field_name = ot[2]
            t.mjd = float(ot[8])
            t.mag_lim = float(ot[10])
            t.band_id = int(ot[7])

            observed_tiles.append(t)
        print("Loaded %s %s tiles..." % (len(ot_result), swope.name))

        print("\nLoading Nickel's Observed Tiles...")
        ot_result = query_db(
            [observed_tile_select % (healpix_map_id, nickel.id)])[0]
        for ot in ot_result:
            t = Tile(float(ot[3]),
                     float(ot[4]),
                     nickel.deg_width,
                     nickel.deg_height,
                     healpix_map_nside,
                     tile_id=int(ot[0]))
            t.field_name = ot[2]
            t.mjd = float(ot[8])
            t.mag_lim = float(ot[10])
            t.band_id = int(ot[7])

            observed_tiles.append(t)
        print("Loaded %s %s tiles..." % (len(ot_result), nickel.name))

        print("\nLoading Thacher's Observed Tiles...")
        ot_result = query_db(
            [observed_tile_select % (healpix_map_id, thacher.id)])[0]
        for ot in ot_result:
            t = Tile(float(ot[3]),
                     float(ot[4]),
                     thacher.deg_width,
                     thacher.deg_height,
                     healpix_map_nside,
                     tile_id=int(ot[0]))
            t.field_name = ot[2]
            t.mjd = float(ot[8])
            t.mag_lim = float(ot[10])
            t.band_id = int(ot[7])

            observed_tiles.append(t)
        print("Loaded %s %s tiles..." % (len(ot_result), thacher.name))

        print("\nLoading KAIT's Observed Tiles...")
        ot_result = query_db(
            [observed_tile_select % (healpix_map_id, kait.id)])[0]
        for ot in ot_result:
            t = Tile(float(ot[3]),
                     float(ot[4]),
                     kait.deg_width,
                     kait.deg_height,
                     healpix_map_nside,
                     tile_id=int(ot[0]))
            t.field_name = ot[2]
            t.mjd = float(ot[8])
            t.mag_lim = float(ot[10])
            t.band_id = int(ot[7])
            observed_tiles.append(t)
        print("Loaded %s %s tiles..." % (len(ot_result), kait.name))

        print("\nLoading SINISTRO's Observed Tiles...")
        ot_result = query_db(
            [observed_tile_select % (healpix_map_id, sinistro.id)])[0]
        for ot in ot_result:
            t = Tile(float(ot[3]),
                     float(ot[4]),
                     sinistro.deg_width,
                     sinistro.deg_height,
                     healpix_map_nside,
                     tile_id=int(ot[0]))
            t.field_name = ot[2]
            t.mjd = float(ot[8])
            t.mag_lim = float(ot[10])
            t.band_id = int(ot[7])
            observed_tiles.append(t)
        print("Loaded %s %s tiles..." % (len(ot_result), sinistro.name))

        print("Getting Detector-Band pairs...")
        detector_band_result = query_db([
            '''SELECT 
                DISTINCT d.Name as Detector, b.Name as Band
            FROM Detector d 
            JOIN ObservedTile ot on ot.Detector_id = d.id
            JOIN Band b on b.id = ot.Band_id
            WHERE ot.HealpixMap_id = %s''' % healpix_map_id
        ])

        print("\nUpdating pixel `delta_mjds` and `lim_mags`...")
        # For each tile:
        # 	we want the MJD of observation, and add that to the list of a pixels' MJD collection.
        # 	we want the limiting mag, add that to the list of a pixel's lim mag collection
        for t in observed_tiles:
            pix_indices = t.enclosed_pixel_indices

            for i in pix_indices:

                # get band from id...
                band = band_dict_by_id[t.band_id]
                band_name = band[1]

                # Some pixels are omitted because their distance information did not converge
                if i not in map_pixel_dict_new:
                    continue

                pix_synopsis_new = map_pixel_dict_new[i]

                if band_name not in pix_synopsis_new.measured_bands:
                    pix_synopsis_new.measured_bands.append(band_name)
                    pix_synopsis_new.delta_mjds[band_name] = {}
                    pix_synopsis_new.lim_mags[band_name] = {}

                pix_synopsis_new.delta_mjds[band_name][t.mjd] = (t.mjd -
                                                                 GW190814_t_0)
                pix_synopsis_new.lim_mags[band_name][t.mjd] = (t.mag_lim)

        print("\nInitializing %s models..." % len(models))

        print("\nUpdating pixel `A_lambda` and `model_observer_time_arr`...")
        # Set pixel `model_observer_time_arr` and `A_lambda`
        for pix_index, pix_synopsis in map_pixel_dict_new.items():

            for model_param_tuple, model_dict in models.items():
                for model_col in model_dict.keys():
                    if model_col in band_mapping_new:

                        band = band_dict_by_name[band_mapping_new[model_col]]
                        band_id = band[0]
                        band_name = band[1]
                        band_coeff = band[2]

                        if band_name in pix_synopsis.measured_bands:
                            if band_name not in pix_synopsis.A_lambda:
                                pix_a_lambda = pix_synopsis.pix_ebv * band_coeff
                                pix_synopsis.A_lambda[band_name] = pix_a_lambda

                time_dilation = 1.0 + pix_synopsis.z
                pix_synopsis.model_observer_time_arr_new[
                    model_param_tuple] = model_dict["time"] * time_dilation

        prep_end = time.time()
        print("Prep time: %s [seconds]" % (prep_end - prep_start))

        compute_start = time.time()
        # NEW Do the calculation...
        print("\nIntegrating total model probs...")
        count = 0

        pix_models = []

        for pix_index, pix_synopsis in map_pixel_dict_new.items():

            for band in pix_synopsis.measured_bands:

                for model_param_tuple, model_dict in models.items():

                    model_abs_mags = model_dict[reverse_band_mapping_new[band]]
                    rise = model_abs_mags[-1] - model_abs_mags[0]
                    run = pix_synopsis.model_observer_time_arr_new[model_param_tuple][-1] - \
                          pix_synopsis.model_observer_time_arr_new[model_param_tuple][0]
                    slope = rise / run
                    intercept = model_abs_mags[0]
                    pixel_delta_mjd = pix_synopsis.delta_mjds[band]

                    dist_mod_arr = pix_synopsis.distance_modulus_arr
                    mwe = pix_synopsis.A_lambda[band]
                    prob_2D = pix_synopsis.prob_2D
                    distance_probs = pix_synopsis.distance_probs

                    for i, (mjd,
                            delta_mjd) in enumerate(pixel_delta_mjd.items()):

                        dmjd = copy.deepcopy(delta_mjd)
                        pix_key = (pix_index, model_param_tuple, band, mjd)
                        pix_models.append(
                            (pix_key, slope, intercept, dist_mod_arr, mwe,
                             prob_2D, distance_probs, dmjd,
                             pix_synopsis.lim_mags[band][mjd]))

                        # # get the corresponding abs mag for the time of the observation
                        # # pix_abs_mag = np.interp(delta_mjd, pix_synopsis.model_observer_time_arr_new[model_param_tuple],
                        # #                         model_abs_mags)
                        #
                        # # This only works beecause these are linear
                        # rise = model_abs_mags[-1] - model_abs_mags[0]
                        # run = pix_synopsis.model_observer_time_arr_new[model_param_tuple][-1] - \
                        #       pix_synopsis.model_observer_time_arr_new[model_param_tuple][0]
                        # slope = rise / run
                        # intercept = model_abs_mags[0]
                        # # pix_abs_mag = slope * delta_mjd + intercept
                        # pix_abs_mag = 0.02
                        #
                        # # print("pix_abs_mag: %s" % pix_abs_mag)
                        #
                        # # compute the distribution in apparent mag
                        # pix_app_mag = np.asarray(pix_synopsis.distance_modulus_arr) + pix_abs_mag + \
                        #               pix_synopsis.A_lambda[band]
                        #
                        # # re-normalize this distribution to sum to the pixel 2D prob
                        # # SIMPS (y, x)
                        # # app_mag_norm = simps(pix_synopsis.distance_probs, pix_app_mag)
                        # # app_mag_norm = trapz(pix_synopsis.distance_probs, pix_app_mag)
                        # app_mag_norm = 0.02
                        #
                        # renorm_pix_app_mag_prob = np.asarray(
                        #     (pix_synopsis.prob_2D / app_mag_norm) * pix_synopsis.distance_probs)
                        #
                        # # Integrate the app mag distribution from arbitrarily bright to the limiting magnitude
                        # # f_interp = lambda x: np.interp(x, pix_app_mag, renorm_pix_app_mag_prob)
                        # # f_interp = interp1d(pix_app_mag, renorm_pix_app_mag_prob)
                        # lower_bound = np.min(pix_app_mag)
                        # upper_bound = np.max(pix_app_mag)
                        # skip_integral = False
                        # prob_to_detect = 0.0
                        #
                        if model_param_tuple not in pix_synopsis.best_integrated_probs:
                            pix_synopsis.best_integrated_probs[
                                model_param_tuple] = {}

                        try:
                            pix_synopsis.best_integrated_probs[
                                model_param_tuple][band][mjd] = 0.0
                        except:
                            pix_synopsis.best_integrated_probs[
                                model_param_tuple][band] = {
                                    mjd: 0.0
                                }
                        #
                        #
                        # if pix_synopsis.lim_mags[band][mjd] < lower_bound:
                        #     # This limiting mag could not detect the model at this time in this band, since it's
                        #     # brightest value (lower_bound) was dimmer than the limiting mag
                        #     skip_integral = True
                        # elif pix_synopsis.lim_mags[band][mjd] > upper_bound:
                        #     # CAUTION: we need to keep our bounds of integration within the app mag gaussian
                        #     # if the lim_mag > max(pix_app_mag), go with max(pix_app_mag), i.e. if limiting mag is
                        #     # dimmer than the model goes, we would have seen the model
                        #     # else if the model goes dimmer, use the lim_mag as an integration bound
                        #     upper_bound = min(np.max(pix_app_mag), pix_synopsis.lim_mags[band][mjd])
                        #
                        # if not skip_integral:
                        #     index_to_integrate_to = max(np.argwhere(pix_app_mag <= upper_bound))[0]
                        #     # prob_to_detect = simps(renorm_pix_app_mag_prob[0:index_to_integrate_to],
                        #     #                        pix_app_mag[0:index_to_integrate_to])
                        #     # prob_to_detect = trapz(renorm_pix_app_mag_prob[0:index_to_integrate_to],
                        #     #                        pix_app_mag[0:index_to_integrate_to])
                        #     prob_to_detect = 0.02
                        #
                        # if not np.isnan(prob_to_detect) and prob_to_detect >= 0.0:
                        #     pix_synopsis.best_integrated_probs[model_param_tuple][band][mjd] = prob_to_detect
            count += 1
            if count % 1000 == 0:
                print("Processed: %s" % count)

        compute_end = time.time()
        print("Compute time: %s [seconds]" % (compute_end - compute_start))

        print("Starting pool integrate...")
        pool_start = time.time()

        # DEBUG
        # for p in pix_models:
        #     integrate_pixel(p)

        pool = mp.Pool(6)
        integrated_pixels = pool.map(integrate_pixel,
                                     pix_models,
                                     chunksize=500)
        for ip in integrated_pixels:
            pix_key = ip[0]
            pix_index = pix_key[0]
            model_param_tuple = pix_key[1]
            band = pix_key[2]
            mjd = pix_key[3]
            prob = ip[1]

            map_pixel_dict_new[pix_index].best_integrated_probs[
                model_param_tuple][band][mjd] = prob

        pool_end = time.time()
        print("Pool time: %s [seconds]" % (pool_end - pool_start))

        # NEW
        # Finally, get the highest valued integration, and sum
        running_sums = {}  # model:band:value
        # pixels_to_plot = {} # model:band:pixel

        for pix_index, pix_synopsis in map_pixel_dict_new.items():
            for band in pix_synopsis.measured_bands:
                for model_param_tuple, model_dict in models.items():

                    pix_max = 0.0
                    if model_param_tuple not in running_sums:
                        running_sums[model_param_tuple] = {}

                    try:
                        t = running_sums[model_param_tuple][band]
                    except:
                        running_sums[model_param_tuple][band] = 0.0

                    probs = []
                    for mjd, integrated_prob in pix_synopsis.best_integrated_probs[
                            model_param_tuple][band].items():
                        probs.append(integrated_prob)

                    pix_max = np.max(probs)
                    running_sums[model_param_tuple][band] += pix_max

        # pixels_to_plot[] append(Pixel_Element(pix_index, healpix_map_nside, pix_max))

        for model_param_tuple, band_dict in running_sums.items():
            print("\nIntegrated prob to detect model (M=%s, dM=%s)" %
                  model_param_tuple)

            for band, running_sum in band_dict.items():
                print("\t%s: %s" % (band, running_sum))

        ## Additional calculation -- for every pixel, just get the highest prob
        running_sums2 = {}
        for model_param_tuple, model_dict in models.items():

            running_sums2[model_param_tuple] = 0.0

            for pix_index, pix_synopsis in map_pixel_dict_new.items():

                pix_max = 0.0
                probs = []

                for band in pix_synopsis.measured_bands:
                    for mjd, integrated_prob in pix_synopsis.best_integrated_probs[
                            model_param_tuple][band].items():
                        probs.append(integrated_prob)

                pix_max = np.max(probs)
                running_sums2[model_param_tuple] += pix_max

        # Build ascii.ecsv formatted output
        cols = ['M', 'dM', 'Prob']
        dtype = ['f8', 'f8', 'f8']
        result_table = Table(dtype=dtype, names=cols)

        for model_param_tuple, prob in running_sums2.items():
            print("\nCombined Integrated prob to detect model (M=%s, dM=%s)" %
                  model_param_tuple)
            print("\t%s" % prob)
            result_table.add_row(
                [model_param_tuple[0], model_param_tuple[1], prob])

        result_table.write("%s/Detection_Results_Linear.prob" %
                           formatted_model_output_dir,
                           overwrite=True,
                           format='ascii.ecsv')
Exemplo n.º 30
0
 def calcDistanceParsec(self, redshift):
     distance = Distance(z=redshift, allow_negative=True).to(u.parsec).value
     return distance