Example #1
0
    def transform_lonlats(self, lons, lats):
        R = 6370997.0
        x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
        y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
        z_coords = R * da.sin(da.deg2rad(lats))

        return da.stack((x_coords, y_coords, z_coords), axis=-1)
def lonlat2xyz(lons, lats):
    """Convert lons and lats to cartesian coordinates."""
    R = 6370997.0
    x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
    y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
    z_coords = R * da.sin(da.deg2rad(lats))
    return x_coords, y_coords, z_coords
Example #3
0
def lonlat2xyz(lons, lats):
    """Convert lons and lats to cartesian coordinates."""
    R = 6370997.0
    x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
    y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
    z_coords = R * da.sin(da.deg2rad(lats))
    return x_coords, y_coords, z_coords
Example #4
0
    def get_angle_info(vza, sza, raa, m_pi):
        """
        Gets the angle information
        """

        AngleInfo = namedtuple('AngleInfo',
                               'vza sza raa vza_rad sza_rad raa_rad')

        # View zenith angle
        vza_rad = da.deg2rad(vza)

        # Solar zenith angle
        sza_rad = da.deg2rad(sza)

        # Relative azimuth angle
        raa_rad = da.deg2rad(raa)

        vza_abs = da.fabs(vza_rad)
        sza_abs = da.fabs(sza_rad)

        raa_abs = da.where((vza_rad < 0) | (sza_rad < 0), m_pi, raa_rad)

        return AngleInfo(vza=vza,
                         sza=sza,
                         raa=raa,
                         vza_rad=vza_abs,
                         sza_rad=sza_abs,
                         raa_rad=raa_abs)
Example #5
0
def lonlat2xyz(lons, lats):

    R = 6370997.0
    x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
    y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
    z_coords = R * da.sin(da.deg2rad(lats))

    return da.stack(
        (x_coords.ravel(), y_coords.ravel(), z_coords.ravel()), axis=-1)
Example #6
0
def lonlat2xyz(lons, lats):
    """Convert geographic coordinates to cartesian 3D coordinates."""
    R = 6370997.0
    x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
    y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
    z_coords = R * da.sin(da.deg2rad(lats))

    return da.stack(
        (x_coords.ravel(), y_coords.ravel(), z_coords.ravel()), axis=-1)
Example #7
0
def lonlat2xyz(lons, lats):

    R = 6370997.0
    x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
    y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
    z_coords = R * da.sin(da.deg2rad(lats))

    return da.stack((x_coords.ravel(), y_coords.ravel(), z_coords.ravel()),
                    axis=-1)
Example #8
0
def cos_zen(utc_time, lon, lat):
    """Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*.
    utc_time: datetime.datetime instance of the UTC time
    lon and lat in degrees.
    """
    lon = da.deg2rad(lon)
    lat = da.deg2rad(lat)

    r_a, dec = sun_ra_dec(utc_time)
    h__ = _local_hour_angle(utc_time, lon, r_a)
    return (da.sin(lat) * da.sin(dec) +
            da.cos(lat) * da.cos(dec) * da.cos(h__))
Example #9
0
def haversines(x1, x2, y1, y2, z1=None, z2=None):

    x1, x2 = da.deg2rad(x1), da.deg2rad(x2)
    y1, y2 = da.deg2rad(y1), da.deg2rad(y2)

    x = (x2 - x1) * da.cos((y1 + y2) * 0.5) * cst.r_earth
    y = (y2 - y1) * cst.r_earth * da.ones_like(x1) * da.ones_like(x2)

    if z1 is None or z2 is None:
        return da.stack((x, y), axis=-1)
    else:
        z1 = da.where(da.isnan(z1), 0, z1)
        z2 = da.where(da.isnan(z2), 0, z2)
        z = (z2 - z1) * da.ones_like(x)
        return da.stack((x, y, z), axis=-1)
Example #10
0
def get_alt_az(utc_time, lon, lat):
    """Return sun altitude and azimuth from *utc_time*, *lon*, and *lat*.
    lon,lat in degrees
    What is the unit of the returned angles and heights!? FIXME!
    """
    lon = da.deg2rad(lon)
    lat = da.deg2rad(lat)

    ra_, dec = sun_ra_dec(utc_time)
    h__ = _local_hour_angle(utc_time, lon, ra_)
    return (da.arcsin(
        da.sin(lat) * np.sin(dec) + da.cos(lat) * np.cos(dec) * np.cos(h__)),
            da.arctan2(
                -np.sin(h__),
                (da.cos(lat) * np.tan(dec) - da.sin(lat) * np.cos(h__))))
    def instantaneous_frequency(self, darray, sample_rate=4, preview=None):
        """
        Description
        -----------
        Compute the Instantaneous Frequency of the input data
        
        Parameters
        ----------
        darray : Array-like, acceptable inputs include Numpy, HDF5, or Dask Arrays
        
        Keywork Arguments
        -----------------  
        sample_rate : Number, sample rate in milliseconds (ms)
        preview : str, enables or disables preview mode and specifies direction
            Acceptable inputs are (None, 'inline', 'xline', 'z')
            Optimizes chunk size in different orientations to facilitate rapid
            screening of algorithm output
        
        Returns
        -------
        result : Dask Array
        """

        darray, chunks_init = self.create_array(darray, preview=preview)

        fs = 1000 / sample_rate
        phase = self.instantaneous_phase(darray)
        phase = da.deg2rad(phase)
        phase = phase.map_blocks(np.unwrap, dtype=darray.dtype)
        phase_prime = sp().first_derivative(phase, axis=-1)
        result = da.absolute((phase_prime / (2.0 * np.pi) * fs))

        return (result)
    def interpolate(self, lon1, lat1, satz1):
        cscan_len = self.cscan_len
        cscan_full_width = self.cscan_full_width

        fscan_width = self.fscan_width
        fscan_len = self.fscan_len

        scans = satz1.shape[0] // cscan_len
        satz1 = satz1.data

        satz1 = satz1.reshape((-1, cscan_len, cscan_full_width))

        satz_a, satz_b, satz_c, satz_d = get_corners(da.deg2rad(satz1))

        c_exp, c_ali = compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d)

        x, y = self.get_coords(scans)
        i_rs, i_rt = da.meshgrid(x, y)

        p_os = 0
        p_ot = 0

        s_s = (p_os + i_rs) * 1. / fscan_width
        s_t = (p_ot + i_rt) * 1. / fscan_len

        cols = fscan_width
        lines = fscan_len

        c_exp_full = self.expand_tiepoint_array(c_exp, lines, cols)
        c_ali_full = self.expand_tiepoint_array(c_ali, lines, cols)

        a_track = s_t
        a_scan = (s_s + s_s * (1 - s_s) * c_exp_full + s_t * (1 - s_t) * c_ali_full)

        res = []
        datasets = lonlat2xyz(lon1, lat1)
        for data in datasets:
            data = data.data
            data = data.reshape((-1, cscan_len, cscan_full_width))
            data_a, data_b, data_c, data_d = get_corners(data)
            data_a = self.expand_tiepoint_array(data_a, lines, cols)
            data_b = self.expand_tiepoint_array(data_b, lines, cols)
            data_c = self.expand_tiepoint_array(data_c, lines, cols)
            data_d = self.expand_tiepoint_array(data_d, lines, cols)

            data_1 = (1 - a_scan) * data_a + a_scan * data_b
            data_2 = (1 - a_scan) * data_d + a_scan * data_c
            data = (1 - a_track) * data_1 + a_track * data_2

            res.append(data)
        lon, lat = xyz2lonlat(*res)
        return xr.DataArray(lon, dims=lon1.dims), xr.DataArray(lat, dims=lat1.dims)
Example #13
0
def SkyToUnitSphere(ra, dec, degrees=True):
    """
    Convert sky coordinates (``ra``, ``dec``) to Cartesian coordinates on
    the unit sphere.

    Parameters
    ----------
    ra : :class:`dask.array.Array`; shape: (N,)
        the right ascension angular coordinate
    dec : :class:`dask.array.Array`; ; shape: (N,)
        the declination angular coordinate
    degrees : bool, optional
        specifies whether ``ra`` and ``dec`` are in degrees or radians

    Returns
    -------
    pos : :class:`dask.array.Array`; shape: (N,3)
        the cartesian position coordinates, where columns represent
        ``x``, ``y``, and ``z``

    Raises
    ------
    TypeError
        If the input columns are not dask arrays
    """
    if not all(isinstance(col, da.Array) for col in [ra, dec]):
        raise TypeError("both ``ra`` and ``dec`` must be dask arrays")

    # put into radians from degrees
    if degrees:
        ra  = da.deg2rad(ra)
        dec = da.deg2rad(dec)

    # cartesian coordinates
    x = da.cos( dec ) * da.cos( ra )
    y = da.cos( dec ) * da.sin( ra )
    z = da.sin( dec )
    return da.vstack([x,y,z]).T
Example #14
0
def test_arithmetic():
    x = np.arange(5).astype('f4') + 2
    y = np.arange(5).astype('i8') + 2
    z = np.arange(5).astype('i4') + 2
    a = da.from_array(x, chunks=(2,))
    b = da.from_array(y, chunks=(2,))
    c = da.from_array(z, chunks=(2,))
    assert eq(a + b, x + y)
    assert eq(a * b, x * y)
    assert eq(a - b, x - y)
    assert eq(a / b, x / y)
    assert eq(b & b, y & y)
    assert eq(b | b, y | y)
    assert eq(b ^ b, y ^ y)
    assert eq(a // b, x // y)
    assert eq(a ** b, x ** y)
    assert eq(a % b, x % y)
    assert eq(a > b, x > y)
    assert eq(a < b, x < y)
    assert eq(a >= b, x >= y)
    assert eq(a <= b, x <= y)
    assert eq(a == b, x == y)
    assert eq(a != b, x != y)

    assert eq(a + 2, x + 2)
    assert eq(a * 2, x * 2)
    assert eq(a - 2, x - 2)
    assert eq(a / 2, x / 2)
    assert eq(b & True, y & True)
    assert eq(b | True, y | True)
    assert eq(b ^ True, y ^ True)
    assert eq(a // 2, x // 2)
    assert eq(a ** 2, x ** 2)
    assert eq(a % 2, x % 2)
    assert eq(a > 2, x > 2)
    assert eq(a < 2, x < 2)
    assert eq(a >= 2, x >= 2)
    assert eq(a <= 2, x <= 2)
    assert eq(a == 2, x == 2)
    assert eq(a != 2, x != 2)

    assert eq(2 + b, 2 + y)
    assert eq(2 * b, 2 * y)
    assert eq(2 - b, 2 - y)
    assert eq(2 / b, 2 / y)
    assert eq(True & b, True & y)
    assert eq(True | b, True | y)
    assert eq(True ^ b, True ^ y)
    assert eq(2 // b, 2 // y)
    assert eq(2 ** b, 2 ** y)
    assert eq(2 % b, 2 % y)
    assert eq(2 > b, 2 > y)
    assert eq(2 < b, 2 < y)
    assert eq(2 >= b, 2 >= y)
    assert eq(2 <= b, 2 <= y)
    assert eq(2 == b, 2 == y)
    assert eq(2 != b, 2 != y)

    assert eq(-a, -x)
    assert eq(abs(a), abs(x))
    assert eq(~(a == b), ~(x == y))
    assert eq(~(a == b), ~(x == y))

    assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
    assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
    assert eq(da.exp(b), np.exp(y))
    assert eq(da.log(a), np.log(x))
    assert eq(da.log10(a), np.log10(x))
    assert eq(da.log1p(a), np.log1p(x))
    assert eq(da.expm1(b), np.expm1(y))
    assert eq(da.sqrt(a), np.sqrt(x))
    assert eq(da.square(a), np.square(x))

    assert eq(da.sin(a), np.sin(x))
    assert eq(da.cos(b), np.cos(y))
    assert eq(da.tan(a), np.tan(x))
    assert eq(da.arcsin(b/10), np.arcsin(y/10))
    assert eq(da.arccos(b/10), np.arccos(y/10))
    assert eq(da.arctan(b/10), np.arctan(y/10))
    assert eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
    assert eq(da.hypot(b, a), np.hypot(y, x))
    assert eq(da.sinh(a), np.sinh(x))
    assert eq(da.cosh(b), np.cosh(y))
    assert eq(da.tanh(a), np.tanh(x))
    assert eq(da.arcsinh(b*10), np.arcsinh(y*10))
    assert eq(da.arccosh(b*10), np.arccosh(y*10))
    assert eq(da.arctanh(b/10), np.arctanh(y/10))
    assert eq(da.deg2rad(a), np.deg2rad(x))
    assert eq(da.rad2deg(a), np.rad2deg(x))

    assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
    assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
    assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
    assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
    assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
    assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
    assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
    assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))

    assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
    assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
    assert eq(da.isfinite(a), np.isfinite(x))
    assert eq(da.isinf(a), np.isinf(x))
    assert eq(da.isnan(a), np.isnan(x))
    assert eq(da.signbit(a - 3), np.signbit(x - 3))
    assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
    assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
    assert eq(da.ldexp(c, c), np.ldexp(z, z))
    assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
    assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
    assert eq(da.ceil(a), np.ceil(x))
    assert eq(da.trunc(a / 2), np.trunc(x / 2))

    assert eq(da.degrees(b), np.degrees(y))
    assert eq(da.radians(a), np.radians(x))

    assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
    assert eq(da.fix(a - 2.5), np.fix(x - 2.5))

    assert eq(da.angle(a + 1j), np.angle(x + 1j))
    assert eq(da.real(a + 1j), np.real(x + 1j))
    assert eq((a + 1j).real, np.real(x + 1j))
    assert eq(da.imag(a + 1j), np.imag(x + 1j))
    assert eq((a + 1j).imag, np.imag(x + 1j))
    assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
    assert eq((a + 1j * b).conj(), (x + 1j * y).conj())

    assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
    assert eq(da.fabs(b), np.fabs(y))
    assert eq(da.sign(b - 2), np.sign(y - 2))

    l1, l2 = da.frexp(a)
    r1, r2 = np.frexp(x)
    assert eq(l1, r1)
    assert eq(l2, r2)

    l1, l2 = da.modf(a)
    r1, r2 = np.modf(x)
    assert eq(l1, r1)
    assert eq(l2, r2)

    assert eq(da.around(a, -1), np.around(x, -1))
Example #15
0
    def interpolate(self, lon1, lat1, satz1):
        cscan_len = self.cscan_len
        cscan_full_width = self.cscan_full_width

        fscan_width = self.fscan_width
        fscan_len = self.fscan_len

        scans = satz1.shape[0] // cscan_len
        satz1 = satz1.data

        satz1 = satz1.reshape((-1, cscan_len, cscan_full_width))

        satz_a, satz_b, satz_c, satz_d = get_corners(da.deg2rad(satz1))

        c_exp, c_ali = compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d)

        x, y = self.get_coords(scans)
        i_rs, i_rt = da.meshgrid(x, y)

        p_os = 0
        p_ot = 0

        s_s = (p_os + i_rs) * 1. / fscan_width
        s_t = (p_ot + i_rt) * 1. / fscan_len

        cols = fscan_width
        lines = fscan_len

        c_exp_full = self.expand_tiepoint_array(c_exp, lines, cols)
        c_ali_full = self.expand_tiepoint_array(c_ali, lines, cols)

        a_track = s_t
        a_scan = (s_s + s_s * (1 - s_s) * c_exp_full + s_t * (1 - s_t) * c_ali_full)

        res = []

        sublat = lat1[::16, ::16]
        sublon = lon1[::16, ::16]
        to_cart = abs(sublat).max() > 60 or (sublon.max() - sublon.min()) > 180

        if to_cart:
            datasets = lonlat2xyz(lon1, lat1)
        else:
            datasets = [lon1, lat1]

        for data in datasets:
            data_attrs = data.attrs
            dims = data.dims
            data = data.data
            data = data.reshape((-1, cscan_len, cscan_full_width))
            data_a, data_b, data_c, data_d = get_corners(data)
            data_a = self.expand_tiepoint_array(data_a, lines, cols)
            data_b = self.expand_tiepoint_array(data_b, lines, cols)
            data_c = self.expand_tiepoint_array(data_c, lines, cols)
            data_d = self.expand_tiepoint_array(data_d, lines, cols)

            data_1 = (1 - a_scan) * data_a + a_scan * data_b
            data_2 = (1 - a_scan) * data_d + a_scan * data_c
            data = (1 - a_track) * data_1 + a_track * data_2

            res.append(xr.DataArray(data, attrs=data_attrs, dims=dims))

        if to_cart:
            return xyz2lonlat(*res)
        else:
            return res
def create_catalog(plot_cubemultipoles=False, num=seed):
    ## Generate lognormal cube with observer at the centre.  Add real, global and local pp redshift space and a flag for in / out of BGS.

    cat = LogNormalCatalog(Plin=Plin,
                           nbar=nbar,
                           BoxSize=boxsize,
                           Nmesh=fftsize,
                           bias=b1,
                           seed=num)
    ## rand  = LogNormalCatalog(Plin=null_power, nbar=nbar, BoxSize=boxsize, Nmesh=fftsize, bias=1.0, seed = 142, cosmo=cosmo, redshift=redshift)

    print("Created lognormal cube.")

    ## Possibility of a uniform cat.
    ## uniform          = UniformCatalog(nbar=150, BoxSize=1.0, seed=42)

    ## Place (0,0,0) at the centre.  Note: Dask array calls are placed on a list of commands, all evaluated on .compute()
    cat['Position'] -= boxsize / 2.

    ## Place in redshift-space according to the global plane-parallel approx.
    RSD_prefactor = (1. + redshift) / (100. * cosmo.efunc(redshift))

    cat['norm'] = da.sqrt(da.sum(cat['Position']**2., axis=1))

    GLOS = [0, 0, 1]  ## Assumes a Kaiser pp approx. along the z-axis

    ## Unit vector in the direction of the galaxy by broadcasting.
    cat['LLOS'] = cat['Position'] / cat['norm'][:, None]

    cat['GLOS_pos'] = cat['Position'] + RSD_prefactor * cat['Velocity'] * GLOS
    cat['LLOS_pos'] = cat[
        'Position'] + RSD_prefactor * cat['Velocity'] * cat['LLOS']

    ## RA and DEC will be returned in degrees, with RA in the range [0,360] and DEC in the range [-90, 90]
    ## Could just add dz explicitly.
    (cat['ra'], cat['dec'],
     cat['RZEE']) = transform.CartesianToSky(cat['Position'],
                                             cosmo,
                                             velocity=None,
                                             observer=[0, 0, 0],
                                             zmax=100.0)

    ## Now apply a zee cut according to local line-of-sight.
    ZMIN = 0.05
    ZMAX = 0.20

    valid = (cat["RZEE"] > ZMIN) & (cat["RZEE"] < ZMAX)
    cat = cat[valid]

    print("Applied %.3lf < (real-space) z < %.3lf cut to catalogue." %
          (ZMIN, ZMAX))

    (cat['ra'], cat['dec'],
     cat['GZEE']) = transform.CartesianToSky(cat['GLOS_pos'],
                                             cosmo,
                                             velocity=None,
                                             observer=[0, 0, 0],
                                             zmax=100.0)
    (cat['ra'], cat['dec'],
     cat['LZEE']) = transform.CartesianToSky(cat['LLOS_pos'],
                                             cosmo,
                                             velocity=None,
                                             observer=[0, 0, 0],
                                             zmax=100.0)

    print("Created redshifts for real, global and local pp redshift space.")

    if plot_cubemultipoles is True:
        pl.clf()

        ## Convert the catalog to the mesh, with CIC interpolation
        mesh = cat.to_mesh(compensated=True,
                           window='cic',
                           position='Position',
                           interlaced=interlaced)
        r = FFTPower(mesh,
                     mode='2d',
                     dk=0.005,
                     kmin=0.01,
                     Nmu=20,
                     los=GLOS,
                     poles=[0, 2, 4])
        poles = r.poles

        for ell in [0, 2]:
            label = r'$\ell=%d$' % ell
            P = poles['power_%d' % ell].real

            if ell == 0:
                P = P - poles.attrs['shotnoise']

        pl.loglog(poles['k'], P, label=label)

        ## Plot real-space power.
        k = np.logspace(-2, 0, 512)
        plt.loglog(k, b1**2 * Plin(k), c='k', label=r'$b_1^2 P_\mathrm{lin}$')

        plt.xlabel(r"$k$ [$h \ \mathrm{Mpc}^{-1}$]")
        plt.ylabel(r"$P_\ell(k)$ [$(h^{-1} \ \mathrm{Mpc})^3$]")

        plt.xlim(0.01, 0.30)
        plt.ylim(1e3, 1e5)

        pl.savefig('lognormalcube_multipoles.pdf')

        print("Plotted lognormal cube multipoles.")

    ## DESI BGS Healpix nside defined footprint; hardcoded nside.
    nside = 128
    imap = np.loadtxt("desibgs_imap_nside_%d.txt" % nside)

    cat['theta'] = 0.5 * np.pi - da.deg2rad(cat['dec'])
    cat['phi'] = da.deg2rad(cat['ra'])

    ipix = hp.pixelfunc.ang2pix(nside,
                                cat['theta'].compute(),
                                cat['phi'].compute(),
                                nest=False,
                                lonlat=False)
    cat['in_bgs'] = imap[ipix]

    ngal = cat['in_bgs'].shape[0]
    bgs_ngal = da.sum(cat['in_bgs']).compute()

    ## Accepted by BGS mask.
    print(
        "Number of galaxies created: %d, Number in BGS: %d, Percentage accepted: %.3lf"
        % (ngal, bgs_ngal, 100. * bgs_ngal / ngal))

    ## Cut by in BGS.
    valid = cat['in_bgs'] > 0.0
    cat = cat[valid]

    print("Applied BGS footprint cut to catalogue.")

    print("Writing lognormal BGS mock to fits.")

    ## Create .fits
    ra = fits.Column(name='RA', format='D', array=cat['ra'].compute())
    dec = fits.Column(name='DEC', format='D', array=cat['dec'].compute())

    rzee = fits.Column(name='RZEE', format='D', array=cat['RZEE'].compute())
    gzee = fits.Column(name='GZEE', format='D', array=cat['GZEE'].compute())
    lzee = fits.Column(name='LZEE', format='D', array=cat['LZEE'].compute())

    cols = fits.ColDefs([ra, dec, rzee, gzee, lzee])
    hdr = fits.Header()

    hdr['Creator'] = 'M. J. Wilson'
    hdr['COMMENT'] = "Lognormal BGS mocks in real, global and local pp redshift space."

    hdu = fits.BinTableHDU.from_columns(cols, header=hdr)

    print(num)

    hdu.writeto(output_dirs[mock_output] +
                '/desi/logmocks/lognormal_bgs_seed-%03d.fits' % num,
                overwrite=True)
Example #17
0
def run_crefl(refl,
              coeffs,
              lon,
              lat,
              sensor_azimuth,
              sensor_zenith,
              solar_azimuth,
              solar_zenith,
              avg_elevation=None,
              percent=False,
              use_abi=False):
    """Run main crefl algorithm.

    All input parameters are per-pixel values meaning they are the same size
    and shape as the input reflectance data, unless otherwise stated.

    :param reflectance_bands: tuple of reflectance band arrays
    :param coefficients: tuple of coefficients for each band (see `get_coefficients`)
    :param lon: input swath longitude array
    :param lat: input swath latitude array
    :param sensor_azimuth: input swath sensor azimuth angle array
    :param sensor_zenith: input swath sensor zenith angle array
    :param solar_azimuth: input swath solar azimuth angle array
    :param solar_zenith: input swath solar zenith angle array
    :param avg_elevation: average elevation (usually pre-calculated and stored in CMGDEM.hdf)
    :param percent: True if input reflectances are on a 0-100 scale instead of 0-1 scale (default: False)

    """
    # FUTURE: Find a way to compute the average elevation before hand
    # Get digital elevation map data for our granule, set ocean fill value to 0
    if avg_elevation is None:
        LOG.debug("No average elevation information provided in CREFL")
        #height = np.zeros(lon.shape, dtype=np.float)
        height = 0.
    else:
        LOG.debug("Using average elevation information provided to CREFL")
        lat[(lat <= -90) | (lat >= 90)] = np.nan
        lon[(lon <= -180) | (lon >= 180)] = np.nan
        row = ((90.0 - lat) * avg_elevation.shape[0] / 180.0).astype(np.int32)
        col = ((lon + 180.0) * avg_elevation.shape[1] / 360.0).astype(np.int32)
        space_mask = da.isnull(lon) | da.isnull(lat)
        row[space_mask] = 0
        col[space_mask] = 0

        def _avg_elevation_index(avg_elevation, row, col):
            return avg_elevation[row, col]

        height = da.map_blocks(_avg_elevation_index,
                               avg_elevation,
                               row,
                               col,
                               dtype=avg_elevation.dtype)
        height = xr.DataArray(height, dims=['y', 'x'])
        # negative heights aren't allowed, clip to 0
        height = height.where((height >= 0.) & ~space_mask, 0.0)
        del lat, lon, row, col
    mus = da.cos(da.deg2rad(solar_zenith))
    mus = mus.where(mus >= 0)
    muv = da.cos(da.deg2rad(sensor_zenith))
    phi = solar_azimuth - sensor_azimuth

    if use_abi:
        LOG.debug("Using ABI CREFL algorithm")
        a_O3 = [268.45, 0.5, 115.42, -3.2922]
        a_H2O = [0.0311, 0.1, 92.471, -1.3814]
        a_O2 = [0.4567, 0.007, 96.4884, -1.6970]
        G_O3 = G_calc(solar_zenith, a_O3) + G_calc(sensor_zenith, a_O3)
        G_H2O = G_calc(solar_zenith, a_H2O) + G_calc(sensor_zenith, a_H2O)
        G_O2 = G_calc(solar_zenith, a_O2) + G_calc(sensor_zenith, a_O2)
        # Note: bh2o values are actually ao2 values for abi
        sphalb, rhoray, TtotraytH2O, tOG = get_atm_variables_abi(
            mus, muv, phi, height, G_O3, G_H2O, G_O2, *coeffs)
    else:
        LOG.debug("Using original VIIRS CREFL algorithm")
        sphalb, rhoray, TtotraytH2O, tOG = get_atm_variables(
            mus, muv, phi, height, *coeffs)

    del solar_azimuth, solar_zenith, sensor_zenith, sensor_azimuth
    # Note: Assume that fill/invalid values are either NaN or we are dealing
    # with masked arrays
    if percent:
        corr_refl = ((refl / 100.) / tOG - rhoray) / TtotraytH2O
    else:
        corr_refl = (refl / tOG - rhoray) / TtotraytH2O
    corr_refl /= (1.0 + corr_refl * sphalb)
    return corr_refl.clip(REFLMIN, REFLMAX)
Example #18
0
def G_calc(zenith, a_coeff):
    return (da.cos(da.deg2rad(zenith)) +
            (a_coeff[0] * (zenith**a_coeff[1]) *
             (a_coeff[2] - zenith)**a_coeff[3]))**-1
Example #19
0
def chand(phi, muv, mus, taur):
    # FROM FUNCTION CHAND
    # phi: azimuthal difference between sun and observation in degree
    #      (phi=0 in backscattering direction)
    # mus: cosine of the sun zenith angle
    # muv: cosine of the observation zenith angle
    # taur: molecular optical depth
    # rhoray: molecular path reflectance
    # constant xdep: depolarization factor (0.0279)
    #          xfd = (1-xdep/(2-xdep)) / (1 + 2*xdep/(2-xdep)) = 2 * (1 - xdep) / (2 + xdep) = 0.958725775
    # */
    xfd = 0.958725775
    xbeta2 = 0.5
    #         float pl[5];
    #         double fs01, fs02, fs0, fs1, fs2;
    as0 = [
        0.33243832, 0.16285370, -0.30924818, -0.10324388, 0.11493334,
        -6.777104e-02, 1.577425e-03, -1.240906e-02, 3.241678e-02, -3.503695e-02
    ]
    as1 = [0.19666292, -5.439061e-02]
    as2 = [0.14545937, -2.910845e-02]
    #         float phios, xcos1, xcos2, xcos3;
    #         float xph1, xph2, xph3, xitm1, xitm2;
    #         float xlntaur, xitot1, xitot2, xitot3;
    #         int i, ib;

    xph1 = 1.0 + (3.0 * mus * mus - 1.0) * (3.0 * muv * muv - 1.0) * xfd / 8.0
    xph2 = -xfd * xbeta2 * 1.5 * mus * muv * da.sqrt(
        1.0 - mus * mus) * da.sqrt(1.0 - muv * muv)
    xph3 = xfd * xbeta2 * 0.375 * (1.0 - mus * mus) * (1.0 - muv * muv)

    # pl[0] = 1.0
    # pl[1] = mus + muv
    # pl[2] = mus * muv
    # pl[3] = mus * mus + muv * muv
    # pl[4] = mus * mus * muv * muv

    fs01 = as0[0] + (mus + muv) * as0[1] + (mus * muv) * as0[2] + (
        mus * mus + muv * muv) * as0[3] + (mus * mus * muv * muv) * as0[4]
    fs02 = as0[5] + (mus + muv) * as0[6] + (mus * muv) * as0[7] + (
        mus * mus + muv * muv) * as0[8] + (mus * mus * muv * muv) * as0[9]
    #         for (i = 0; i < 5; i++) {
    #                 fs01 += (double) (pl[i] * as0[i]);
    #                 fs02 += (double) (pl[i] * as0[5 + i]);
    #         }

    # for refl, (ah2o, bh2o, ao3, tau) in zip(reflectance_bands, coefficients):

    # ib = find_coefficient_index(center_wl)
    # if ib is None:
    #     raise ValueError("Can't handle band with wavelength '{}'".format(center_wl))

    xlntaur = da.log(taur)

    fs0 = fs01 + fs02 * xlntaur
    fs1 = as1[0] + xlntaur * as1[1]
    fs2 = as2[0] + xlntaur * as2[1]
    del xlntaur, fs01, fs02

    trdown = da.exp(-taur / mus)
    trup = da.exp(-taur / muv)

    xitm1 = (1.0 - trdown * trup) / 4.0 / (mus + muv)
    xitm2 = (1.0 - trdown) * (1.0 - trup)
    xitot1 = xph1 * (xitm1 + xitm2 * fs0)
    xitot2 = xph2 * (xitm1 + xitm2 * fs1)
    xitot3 = xph3 * (xitm1 + xitm2 * fs2)
    del xph1, xph2, xph3, xitm1, xitm2, fs0, fs1, fs2

    phios = da.deg2rad(phi + 180.0)
    xcos1 = 1.0
    xcos2 = da.cos(phios)
    xcos3 = da.cos(2.0 * phios)
    del phios

    rhoray = xitot1 * xcos1 + xitot2 * xcos2 * 2.0 + xitot3 * xcos3 * 2.0
    return rhoray, trdown, trup
Example #20
0
    def get_reflectance(self, sun_zenith, sat_zenith, azidiff, bandname, redband=None):
        """Get the reflectance from the three sun-sat angles"""
        # Get wavelength in nm for band:
        if isinstance(bandname, float):
            LOG.warning('A wavelength is provided instead of band name - ' +
                        'disregard the relative spectral responses and assume ' +
                        'it is the effective wavelength: %f (micro meter)', bandname)
            wvl = bandname * 1000.0
        else:
            wvl = self.get_effective_wavelength(bandname)
            wvl = wvl * 1000.0

        rayl, wvl_coord, azid_coord, satz_sec_coord, sunz_sec_coord = self.get_reflectance_lut()

        # force dask arrays
        compute = False
        if HAVE_DASK and not isinstance(sun_zenith, Array):
            compute = True
            sun_zenith = from_array(sun_zenith, chunks=sun_zenith.shape)
            sat_zenith = from_array(sat_zenith, chunks=sat_zenith.shape)
            azidiff = from_array(azidiff, chunks=azidiff.shape)
            if redband is not None:
                redband = from_array(redband, chunks=redband.shape)

        clip_angle = rad2deg(arccos(1. / sunz_sec_coord.max()))
        sun_zenith = clip(sun_zenith, 0, clip_angle)
        sunzsec = 1. / cos(deg2rad(sun_zenith))
        clip_angle = rad2deg(arccos(1. / satz_sec_coord.max()))
        sat_zenith = clip(sat_zenith, 0, clip_angle)
        satzsec = 1. / cos(deg2rad(sat_zenith))
        shape = sun_zenith.shape

        if not(wvl_coord.min() < wvl < wvl_coord.max()):
            LOG.warning(
                "Effective wavelength for band %s outside 400-800 nm range!",
                str(bandname))
            LOG.info(
                "Set the rayleigh/aerosol reflectance contribution to zero!")
            if HAVE_DASK:
                chunks = sun_zenith.chunks if redband is None else redband.chunks
                res = zeros(shape, chunks=chunks)
                return res.compute() if compute else res
            else:
                return zeros(shape)

        idx = np.searchsorted(wvl_coord, wvl)
        wvl1 = wvl_coord[idx - 1]
        wvl2 = wvl_coord[idx]

        fac = (wvl2 - wvl) / (wvl2 - wvl1)
        raylwvl = fac * rayl[idx - 1, :, :, :] + (1 - fac) * rayl[idx, :, :, :]
        tic = time.time()

        smin = [sunz_sec_coord[0], azid_coord[0], satz_sec_coord[0]]
        smax = [sunz_sec_coord[-1], azid_coord[-1], satz_sec_coord[-1]]
        orders = [
            len(sunz_sec_coord), len(azid_coord), len(satz_sec_coord)]
        f_3d_grid = atleast_2d(raylwvl.ravel())

        if HAVE_DASK and isinstance(smin[0], Array):
            # compute all of these at the same time before passing to the interpolator
            # otherwise they are computed separately
            smin, smax, orders, f_3d_grid = da.compute(smin, smax, orders, f_3d_grid)
        minterp = MultilinearInterpolator(smin, smax, orders)
        minterp.set_values(f_3d_grid)

        if HAVE_DASK:
            ipn = map_blocks(self._do_interp, minterp, sunzsec, azidiff,
                             satzsec, dtype=raylwvl.dtype, chunks=azidiff.chunks)
        else:
            ipn = self._do_interp(minterp, sunzsec, azidiff, satzsec)

        LOG.debug("Time - Interpolation: {0:f}".format(time.time() - tic))

        ipn *= 100
        res = ipn
        if redband is not None:
            res = where(redband < 20., res,
                        (1 - (redband - 20) / 80) * res)

        res = clip(res, 0, 100)
        if compute:
            res = res.compute()
        return res
Example #21
0
def SkyToUnitSphere(ra, dec, degrees=True, frame='icrs'):
    """
    Convert sky coordinates (``ra``, ``dec``) to Cartesian coordinates on
    the unit sphere.

    Parameters
    ----------
    ra : :class:`dask.array.Array`; shape: (N,)
        the right ascension angular coordinate
    dec : :class:`dask.array.Array`; ; shape: (N,)
        the declination angular coordinate
    degrees : bool, optional
        specifies whether ``ra`` and ``dec`` are in degrees or radians
    frame : string ('icrs' or 'galactic')
        speciefies which frame the Cartesian coordinates is. Useful if you know
        the simulation (usually cartesian) is in galactic units but you want
        to convert to the icrs (ra, dec) usually used in surveys.

    Returns
    -------
    pos : :class:`dask.array.Array`; shape: (N,3)
        the cartesian position coordinates, where columns represent
        ``x``, ``y``, and ``z``

    Raises
    ------
    TypeError
        If the input columns are not dask arrays
    """
    ra, dec = da.broadcast_arrays(ra, dec)

    if frame == 'icrs':
        # no frame transformation
        # put into radians from degrees
        if degrees:
            ra  = da.deg2rad(ra)
            dec = da.deg2rad(dec)

        # cartesian coordinates
        x = da.cos( dec ) * da.cos( ra )
        y = da.cos( dec ) * da.sin( ra )
        z = da.sin( dec )
        return da.vstack([x,y,z]).T
    else:
        from astropy.coordinates import SkyCoord

        if degrees:
            ra  = da.deg2rad(ra)
            dec = da.deg2rad(dec)

        def eq_to_cart(ra, dec):
            try:
                sc = SkyCoord(ra, dec, unit='rad', representation_type='unitspherical', frame='icrs')
            except:
                sc = SkyCoord(ra, dec, unit='rad', representation='unitspherical', frame='icrs')

            scg = sc.transform_to(frame=frame)
            scg = scg.cartesian

            x, y, z = scg.x.value, scg.y.value, scg.z.value
            return numpy.stack([x, y, z], axis=1)

        arr = da.apply_gufunc(eq_to_cart, '(),()->(p)', ra, dec, output_dtypes=[ra.dtype], output_sizes={'p': 3})
        return arr
    def interpolate(self, lon1, lat1, satz1):
        cscan_len = self.cscan_len
        cscan_full_width = self.cscan_full_width

        fscan_width = self.fscan_width
        fscan_len = self.fscan_len

        scans = satz1.shape[0] // cscan_len
        satz1 = satz1.data

        satz1 = satz1.reshape((-1, cscan_len, cscan_full_width))

        satz_a, satz_b, satz_c, satz_d = get_corners(da.deg2rad(satz1))

        c_exp, c_ali = compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d)

        x, y = self.get_coords(scans)
        i_rs, i_rt = da.meshgrid(x, y)

        p_os = 0
        p_ot = 0

        s_s = (p_os + i_rs) * 1. / fscan_width
        s_t = (p_ot + i_rt) * 1. / fscan_len

        cols = fscan_width
        lines = fscan_len

        c_exp_full = self.expand_tiepoint_array(c_exp, lines, cols)
        c_ali_full = self.expand_tiepoint_array(c_ali, lines, cols)

        a_track = s_t
        a_scan = (s_s + s_s * (1 - s_s) * c_exp_full + s_t*(1 - s_t) * c_ali_full)

        res = []

        sublat = lat1[::16, ::16]
        sublon = lon1[::16, ::16]
        to_cart = abs(sublat).max() > 60 or (sublon.max() - sublon.min()) > 180

        if to_cart:
            datasets = lonlat2xyz(lon1, lat1)
        else:
            datasets = [lon1, lat1]

        for data in datasets:
            data_attrs = data.attrs
            dims = data.dims
            data = data.data
            data = data.reshape((-1, cscan_len, cscan_full_width))
            data_a, data_b, data_c, data_d = get_corners(data)
            data_a = self.expand_tiepoint_array(data_a, lines, cols)
            data_b = self.expand_tiepoint_array(data_b, lines, cols)
            data_c = self.expand_tiepoint_array(data_c, lines, cols)
            data_d = self.expand_tiepoint_array(data_d, lines, cols)

            data_1 = (1 - a_scan) * data_a + a_scan * data_b
            data_2 = (1 - a_scan) * data_d + a_scan * data_c
            data = (1 - a_track) * data_1 + a_track * data_2

            res.append(xr.DataArray(data, attrs=data_attrs, dims=dims))

        if to_cart:
            return xyz2lonlat(*res)
        else:
            return res
Example #23
0
def test_arithmetic():
    x = np.arange(5).astype('f4') + 2
    y = np.arange(5).astype('i8') + 2
    z = np.arange(5).astype('i4') + 2
    a = da.from_array(x, chunks=(2, ))
    b = da.from_array(y, chunks=(2, ))
    c = da.from_array(z, chunks=(2, ))
    assert eq(a + b, x + y)
    assert eq(a * b, x * y)
    assert eq(a - b, x - y)
    assert eq(a / b, x / y)
    assert eq(b & b, y & y)
    assert eq(b | b, y | y)
    assert eq(b ^ b, y ^ y)
    assert eq(a // b, x // y)
    assert eq(a**b, x**y)
    assert eq(a % b, x % y)
    assert eq(a > b, x > y)
    assert eq(a < b, x < y)
    assert eq(a >= b, x >= y)
    assert eq(a <= b, x <= y)
    assert eq(a == b, x == y)
    assert eq(a != b, x != y)

    assert eq(a + 2, x + 2)
    assert eq(a * 2, x * 2)
    assert eq(a - 2, x - 2)
    assert eq(a / 2, x / 2)
    assert eq(b & True, y & True)
    assert eq(b | True, y | True)
    assert eq(b ^ True, y ^ True)
    assert eq(a // 2, x // 2)
    assert eq(a**2, x**2)
    assert eq(a % 2, x % 2)
    assert eq(a > 2, x > 2)
    assert eq(a < 2, x < 2)
    assert eq(a >= 2, x >= 2)
    assert eq(a <= 2, x <= 2)
    assert eq(a == 2, x == 2)
    assert eq(a != 2, x != 2)

    assert eq(2 + b, 2 + y)
    assert eq(2 * b, 2 * y)
    assert eq(2 - b, 2 - y)
    assert eq(2 / b, 2 / y)
    assert eq(True & b, True & y)
    assert eq(True | b, True | y)
    assert eq(True ^ b, True ^ y)
    assert eq(2 // b, 2 // y)
    assert eq(2**b, 2**y)
    assert eq(2 % b, 2 % y)
    assert eq(2 > b, 2 > y)
    assert eq(2 < b, 2 < y)
    assert eq(2 >= b, 2 >= y)
    assert eq(2 <= b, 2 <= y)
    assert eq(2 == b, 2 == y)
    assert eq(2 != b, 2 != y)

    assert eq(-a, -x)
    assert eq(abs(a), abs(x))
    assert eq(~(a == b), ~(x == y))
    assert eq(~(a == b), ~(x == y))

    assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
    assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
    assert eq(da.exp(b), np.exp(y))
    assert eq(da.log(a), np.log(x))
    assert eq(da.log10(a), np.log10(x))
    assert eq(da.log1p(a), np.log1p(x))
    assert eq(da.expm1(b), np.expm1(y))
    assert eq(da.sqrt(a), np.sqrt(x))
    assert eq(da.square(a), np.square(x))

    assert eq(da.sin(a), np.sin(x))
    assert eq(da.cos(b), np.cos(y))
    assert eq(da.tan(a), np.tan(x))
    assert eq(da.arcsin(b / 10), np.arcsin(y / 10))
    assert eq(da.arccos(b / 10), np.arccos(y / 10))
    assert eq(da.arctan(b / 10), np.arctan(y / 10))
    assert eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))
    assert eq(da.hypot(b, a), np.hypot(y, x))
    assert eq(da.sinh(a), np.sinh(x))
    assert eq(da.cosh(b), np.cosh(y))
    assert eq(da.tanh(a), np.tanh(x))
    assert eq(da.arcsinh(b * 10), np.arcsinh(y * 10))
    assert eq(da.arccosh(b * 10), np.arccosh(y * 10))
    assert eq(da.arctanh(b / 10), np.arctanh(y / 10))
    assert eq(da.deg2rad(a), np.deg2rad(x))
    assert eq(da.rad2deg(a), np.rad2deg(x))

    assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
    assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
    assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
    assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
    assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
    assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
    assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
    assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))

    assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
    assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
    assert eq(da.isfinite(a), np.isfinite(x))
    assert eq(da.isinf(a), np.isinf(x))
    assert eq(da.isnan(a), np.isnan(x))
    assert eq(da.signbit(a - 3), np.signbit(x - 3))
    assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
    assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
    assert eq(da.ldexp(c, c), np.ldexp(z, z))
    assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
    assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
    assert eq(da.ceil(a), np.ceil(x))
    assert eq(da.trunc(a / 2), np.trunc(x / 2))

    assert eq(da.degrees(b), np.degrees(y))
    assert eq(da.radians(a), np.radians(x))

    assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
    assert eq(da.fix(a - 2.5), np.fix(x - 2.5))

    assert eq(da.angle(a + 1j), np.angle(x + 1j))
    assert eq(da.real(a + 1j), np.real(x + 1j))
    assert eq((a + 1j).real, np.real(x + 1j))
    assert eq(da.imag(a + 1j), np.imag(x + 1j))
    assert eq((a + 1j).imag, np.imag(x + 1j))
    assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
    assert eq((a + 1j * b).conj(), (x + 1j * y).conj())

    assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
    assert eq(da.fabs(b), np.fabs(y))
    assert eq(da.sign(b - 2), np.sign(y - 2))

    l1, l2 = da.frexp(a)
    r1, r2 = np.frexp(x)
    assert eq(l1, r1)
    assert eq(l2, r2)

    l1, l2 = da.modf(a)
    r1, r2 = np.modf(x)
    assert eq(l1, r1)
    assert eq(l2, r2)

    assert eq(da.around(a, -1), np.around(x, -1))
    def interpolate(self, lon1, lat1, satz1):
        cscan_len = self.cscan_len
        cscan_full_width = self.cscan_full_width

        fscan_width = self.fscan_width
        fscan_len = self.fscan_len

        scans = lat1.shape[0] // cscan_len
        latattrs = lat1.attrs
        lonattrs = lon1.attrs
        dims = lat1.dims
        lat1 = lat1.data
        lon1 = lon1.data
        satz1 = satz1.data

        lat1 = lat1.reshape((-1, cscan_len, cscan_full_width))
        lon1 = lon1.reshape((-1, cscan_len, cscan_full_width))
        satz1 = satz1.reshape((-1, cscan_len, cscan_full_width))

        lats_a, lats_b, lats_c, lats_d = get_corners(lat1)
        lons_a, lons_b, lons_c, lons_d = get_corners(lon1)
        satz_a, satz_b, satz_c, satz_d = get_corners(da.deg2rad(satz1))
        c_exp, c_ali = compute_expansion_alignment(satz_a, satz_b, satz_c,
                                                   satz_d)

        x, y = self.get_coords(scans)
        i_rs, i_rt = da.meshgrid(x, y)

        p_os = 0
        p_ot = 0

        s_s = (p_os + i_rs) * 1. / fscan_width
        s_t = (p_ot + i_rt) * 1. / fscan_len

        cols = fscan_width
        lines = fscan_len

        c_exp_full = self.expand_tiepoint_array(c_exp, lines, cols)
        c_ali_full = self.expand_tiepoint_array(c_ali, lines, cols)

        a_track = s_t
        a_scan = (s_s + s_s * (1 - s_s) * c_exp_full + s_t *
                  (1 - s_t) * c_ali_full)

        lats_a = self.expand_tiepoint_array(lats_a, lines, cols)
        lats_b = self.expand_tiepoint_array(lats_b, lines, cols)
        lats_c = self.expand_tiepoint_array(lats_c, lines, cols)
        lats_d = self.expand_tiepoint_array(lats_d, lines, cols)
        lons_a = self.expand_tiepoint_array(lons_a, lines, cols)
        lons_b = self.expand_tiepoint_array(lons_b, lines, cols)
        lons_c = self.expand_tiepoint_array(lons_c, lines, cols)
        lons_d = self.expand_tiepoint_array(lons_d, lines, cols)

        lats_1 = (1 - a_scan) * lats_a + a_scan * lats_b
        lats_2 = (1 - a_scan) * lats_d + a_scan * lats_c
        lats = (1 - a_track) * lats_1 + a_track * lats_2

        lons_1 = (1 - a_scan) * lons_a + a_scan * lons_b
        lons_2 = (1 - a_scan) * lons_d + a_scan * lons_c
        lons = (1 - a_track) * lons_1 + a_track * lons_2

        return xr.DataArray(lons, attrs=lonattrs,
                            dims=dims), xr.DataArray(lats,
                                                     attrs=latattrs,
                                                     dims=dims)
Example #25
0
def SkyToUnitSphere(ra, dec, degrees=True, frame='icrs'):
    """
    Convert sky coordinates (``ra``, ``dec``) to Cartesian coordinates on
    the unit sphere.

    Parameters
    ----------
    ra : :class:`dask.array.Array`; shape: (N,)
        the right ascension angular coordinate
    dec : :class:`dask.array.Array`; ; shape: (N,)
        the declination angular coordinate
    degrees : bool, optional
        specifies whether ``ra`` and ``dec`` are in degrees or radians
    frame : string ('icrs' or 'galactic')
        speciefies which frame the Cartesian coordinates is. Useful if you know
        the simulation (usually cartesian) is in galactic units but you want
        to convert to the icrs (ra, dec) usually used in surveys.

    Returns
    -------
    pos : :class:`dask.array.Array`; shape: (N,3)
        the cartesian position coordinates, where columns represent
        ``x``, ``y``, and ``z``

    Raises
    ------
    TypeError
        If the input columns are not dask arrays
    """
    ra, dec = da.broadcast_arrays(ra, dec)

    if frame == 'icrs':
        # no frame transformation
        # put into radians from degrees
        if degrees:
            ra  = da.deg2rad(ra)
            dec = da.deg2rad(dec)

        # cartesian coordinates
        x = da.cos( dec ) * da.cos( ra )
        y = da.cos( dec ) * da.sin( ra )
        z = da.sin( dec )
        return da.vstack([x,y,z]).T
    else:
        from astropy.coordinates import SkyCoord

        if degrees:
            ra  = da.deg2rad(ra)
            dec = da.deg2rad(dec)

        def eq_to_cart(ra, dec):
            try:
                sc = SkyCoord(ra, dec, unit='rad', representation_type='unitspherical', frame='icrs')
            except:
                sc = SkyCoord(ra, dec, unit='rad', representation='unitspherical', frame='icrs')

            scg = sc.transform_to(frame=frame)
            scg = scg.cartesian

            x, y, z = scg.x.value, scg.y.value, scg.z.value
            return numpy.stack([x, y, z], axis=1)

        arr = da.apply_gufunc(eq_to_cart, '(),()->(p)', ra, dec, output_dtypes=[ra.dtype], output_sizes={'p': 3})
        return arr
Example #26
0
    def norm_topo(self,
                  data,
                  elev,
                  solar_za,
                  solar_az,
                  slope=None,
                  aspect=None,
                  method='empirical-rotation',
                  slope_thresh=2,
                  nodata=0,
                  elev_nodata=-32768,
                  scale_factor=1,
                  angle_scale=0.01,
                  n_jobs=1,
                  robust=False,
                  min_samples=100,
                  slope_kwargs=None,
                  aspect_kwargs=None,
                  band_coeffs=None):

        """
        Applies topographic normalization

        Args:
            data (2d or 3d DataArray): The data to normalize, in the range 0-1.
            elev (2d DataArray): The elevation data.
            solar_za (2d DataArray): The solar zenith angles (degrees).
            solar_az (2d DataArray): The solar azimuth angles (degrees).
            slope (2d DataArray): The slope data. If not given, slope is calculated from ``elev``.
            aspect (2d DataArray): The aspect data. If not given, aspect is calculated from ``elev``.
            method (Optional[str]): The method to apply. Choices are ['c', 'empirical-rotation'].
            slope_thresh (Optional[float or int]): The slope threshold. Any samples with
                values < ``slope_thresh`` are not adjusted.
            nodata (Optional[int or float]): The 'no data' value for ``data``.
            elev_nodata (Optional[float or int]): The 'no data' value for ``elev``.
            scale_factor (Optional[float]): A scale factor to apply to the input data.
            angle_scale (Optional[float]): The angle scale factor.
            n_jobs (Optional[int]): The number of parallel workers for ``LinearRegression.fit``.
            robust (Optional[bool]): Whether to fit a robust regression.
            min_samples (Optional[int]): The minimum number of samples required to fit a regression.
            slope_kwargs (Optional[dict]): Keyword arguments passed to ``gdal.DEMProcessingOptions``
                to calculate the slope.
            aspect_kwargs (Optional[dict]): Keyword arguments passed to ``gdal.DEMProcessingOptions``
                to calculate the aspect.
            band_coeffs (Optional[dict]): Slope and intercept coefficients for each band.

        References:

            See :cite:`teillet_etal_1982` for the C-correction method.
            See :cite:`tan_etal_2010` for the Empirical Rotation method.

        Returns:
            ``xarray.DataArray``

        Examples:
            >>> import geowombat as gw
            >>> from geowombat.radiometry import Topo
            >>>
            >>> topo = Topo()
            >>>
            >>> # Example where pixel angles are stored in separate GeoTiff files
            >>> with gw.config.update(sensor='l7', scale_factor=0.0001, nodata=0):
            >>>
            >>>     with gw.open('landsat.tif') as src,
            >>>         gw.open('srtm') as elev,
            >>>             gw.open('solarz.tif') as solarz,
            >>>                 gw.open('solara.tif') as solara:
            >>>
            >>>         src_norm = topo.norm_topo(src, elev, solarz, solara, n_jobs=-1)
        """

        method = method.strip().lower()

        if method not in ['c', 'empirical-rotation']:

            logger.exception("  Currently, the only supported methods are 'c' and 'empirical-rotation'.")
            raise NameError

        attrs = data.attrs.copy()

        if not nodata:
            nodata = data.gw.nodata

        if scale_factor == 1.0:
            scale_factor = data.gw.scale_factor

        # Scale the reflectance data
        if scale_factor != 1:
            data = data * scale_factor

        if not slope_kwargs:

            slope_kwargs = dict(format='MEM',
                                computeEdges=True,
                                alg='ZevenbergenThorne',
                                slopeFormat='degree')

        if not aspect_kwargs:

            aspect_kwargs = dict(format='MEM',
                                 computeEdges=True,
                                 alg='ZevenbergenThorne',
                                 trigonometric=False,
                                 zeroForFlat=True)

        slope_kwargs['format'] = 'MEM'
        slope_kwargs['slopeFormat'] = 'degree'
        aspect_kwargs['format'] = 'MEM'

        # Force to SRTM resolution
        proc_dims = (int((data.gw.ncols*data.gw.cellx) / 30.0),
                     int((data.gw.nrows*data.gw.celly) / 30.0))

        w = int((5 * 30.0) / data.gw.celly)

        if w % 2 == 0:
            w += 1

        if isinstance(slope, xr.DataArray):
            slope_deg_fd = slope.squeeze().data
        else:

            slope_deg = calc_slope_delayed(elev.squeeze().data, proc_dims=proc_dims, w=w, **slope_kwargs)
            slope_deg_fd = da.from_delayed(slope_deg, (data.gw.nrows, data.gw.ncols), dtype='float64')

        if isinstance(aspect, xr.DataArray):
            aspect_deg_fd = aspect.squeeze().data
        else:

            aspect_deg = calc_aspect_delayed(elev.squeeze().data, proc_dims=proc_dims, w=w, **aspect_kwargs)
            aspect_deg_fd = da.from_delayed(aspect_deg, (data.gw.nrows, data.gw.ncols), dtype='float64')

        nodata_samps = da.where((elev.data == elev_nodata) |
                                (data.max(dim='band').data == nodata) |
                                (slope_deg_fd < slope_thresh), 1, 0)

        slope_rad = da.deg2rad(slope_deg_fd)
        aspect_rad = da.deg2rad(aspect_deg_fd)

        # Convert degrees to radians
        solar_za = da.deg2rad(solar_za.squeeze().data * angle_scale)
        solar_az = da.deg2rad(solar_az.squeeze().data * angle_scale)

        cos_z = da.cos(solar_za)

        # Calculate the illumination angle
        il = da.cos(slope_rad) * cos_z + da.sin(slope_rad) * da.sin(solar_za) * da.cos(solar_az - aspect_rad)

        sr_adj = list()
        for band in data.band.values.tolist():

            if method == 'c':

                sr_adj.append(self._method_c(data.sel(band=band).data,
                                             il,
                                             cos_z,
                                             nodata_samps,
                                             min_samples,
                                             n_jobs,
                                             robust,
                                             band_coeffs,
                                             band))

            else:

                sr_adj.append(self._method_empirical_rotation(data.sel(band=band).data,
                                                              il,
                                                              cos_z,
                                                              nodata_samps,
                                                              min_samples,
                                                              n_jobs,
                                                              robust,
                                                              band_coeffs,
                                                              band))

        adj_data = xr.DataArray(data=da.concatenate(sr_adj).reshape((data.gw.nbands,
                                                                     data.gw.nrows,
                                                                     data.gw.ncols)),
                                coords={'band': data.band.values.tolist(),
                                        'y': data.y.values,
                                        'x': data.x.values},
                                dims=('band', 'y', 'x'),
                                attrs=data.attrs)

        attrs['calibration'] = 'Topographic-adjusted'
        attrs['nodata'] = nodata
        attrs['drange'] = (0, 1)

        adj_data.attrs = attrs

        return adj_data
Example #27
0
    def get_reflectance(self, sun_zenith, sat_zenith, azidiff, bandname, redband=None):
        """Get the reflectance from the three sun-sat angles"""
        # Get wavelength in nm for band:
        if isinstance(bandname, float):
            LOG.warning('A wavelength is provided instead of band name - ' +
                        'disregard the relative spectral responses and assume ' +
                        'it is the effective wavelength: %f (micro meter)', bandname)
            wvl = bandname * 1000.0
        else:
            wvl = self.get_effective_wavelength(bandname)
            if wvl is None:
                LOG.error("Can't get effective wavelength for band %s on platform %s and sensor %s",
                          str(bandname), self.platform_name, self.sensor)
                return None
            else:
                wvl = wvl * 1000.0

        rayl, wvl_coord, azid_coord, satz_sec_coord, sunz_sec_coord = \
            self.get_reflectance_lut()

        # force dask arrays
        compute = False
        if HAVE_DASK and not isinstance(sun_zenith, Array):
            compute = True
            sun_zenith = from_array(sun_zenith, chunks=sun_zenith.shape)
            sat_zenith = from_array(sat_zenith, chunks=sat_zenith.shape)
            azidiff = from_array(azidiff, chunks=azidiff.shape)
            if redband is not None:
                redband = from_array(redband, chunks=redband.shape)

        clip_angle = rad2deg(arccos(1. / sunz_sec_coord.max()))
        sun_zenith = clip(sun_zenith, 0, clip_angle)
        sunzsec = 1. / cos(deg2rad(sun_zenith))
        clip_angle = rad2deg(arccos(1. / satz_sec_coord.max()))
        sat_zenith = clip(sat_zenith, 0, clip_angle)
        satzsec = 1. / cos(deg2rad(sat_zenith))
        shape = sun_zenith.shape

        if not(wvl_coord.min() < wvl < wvl_coord.max()):
            LOG.warning(
                "Effective wavelength for band %s outside 400-800 nm range!",
                str(bandname))
            LOG.info(
                "Set the rayleigh/aerosol reflectance contribution to zero!")
            if HAVE_DASK:
                chunks = sun_zenith.chunks if redband is None \
                    else redband.chunks
                res = zeros(shape, chunks=chunks)
                return res.compute() if compute else res
            else:
                return zeros(shape)

        idx = np.searchsorted(wvl_coord, wvl)
        wvl1 = wvl_coord[idx - 1]
        wvl2 = wvl_coord[idx]

        fac = (wvl2 - wvl) / (wvl2 - wvl1)
        raylwvl = fac * rayl[idx - 1, :, :, :] + (1 - fac) * rayl[idx, :, :, :]
        tic = time.time()

        smin = [sunz_sec_coord[0], azid_coord[0], satz_sec_coord[0]]
        smax = [sunz_sec_coord[-1], azid_coord[-1], satz_sec_coord[-1]]
        orders = [
            len(sunz_sec_coord), len(azid_coord), len(satz_sec_coord)]
        f_3d_grid = atleast_2d(raylwvl.ravel())

        if HAVE_DASK and isinstance(smin[0], Array):
            # compute all of these at the same time before passing to the interpolator
            # otherwise they are computed separately
            smin, smax, orders, f_3d_grid = da.compute(smin, smax, orders, f_3d_grid)
        minterp = MultilinearInterpolator(smin, smax, orders)
        minterp.set_values(f_3d_grid)

        def _do_interp(minterp, sunzsec, azidiff, satzsec):
            interp_points2 = np.vstack((sunzsec.ravel(),
                                        180 - azidiff.ravel(),
                                        satzsec.ravel()))
            res = minterp(interp_points2)
            return res.reshape(sunzsec.shape)

        if HAVE_DASK:
            ipn = map_blocks(_do_interp, minterp, sunzsec, azidiff,
                             satzsec, dtype=raylwvl.dtype,
                             chunks=azidiff.chunks)
        else:
            ipn = _do_interp(minterp, sunzsec, azidiff, satzsec)

        LOG.debug("Time - Interpolation: {0:f}".format(time.time() - tic))

        ipn *= 100
        res = ipn
        if redband is not None:
            res = where(redband < 20., res,
                        (1 - (redband - 20) / 80) * res)

        res = clip(res, 0, 100)
        if compute:
            res = res.compute()
        return res