예제 #1
0
파일: gamma.py 프로젝트: jwayne/conseval
    def __init__(self, alpha, beta, K):
        """
        Note alpha/beta = mean, alpha/beta^2 = variance
        @param alpha:
            shape parameter
        @param beta:
            inverse scale parameter
        @param K:
            number of bins
        """
        if alpha <= 0:
            raise Exception("alpha = %f <= 0" % alpha)
        if beta <= 0:
            raise Exception("beta = %f <= 0" % beta)
        if K < 1:
            raise Exception("Num bins = %d < 1" % K)

        # find upper boundaries of each bin
        max_prob = gammainc(alpha, self.MAX_RATE)
        bin_prob = max_prob / K
        targets = np.arange(bin_prob, max_prob+bin_prob/2, bin_prob)
        #XXX: not sure why we don't divide targets / beta
        bin_ubounds = gammaincinv(alpha, targets) / beta

        bin_lbounds = np.zeros(K)
        bin_lbounds[1:] = bin_ubounds[:-1]
        tmp = gammainc(alpha+1, bin_ubounds * beta) - gammainc(alpha+1, bin_lbounds * beta)
        bin_rates = tmp * alpha / beta * K

        # Rate of middle of each bin
        self.bin_rates = bin_rates
        # Probability mass of each bin
        self.bin_probs = np.zeros(K) + bin_prob
예제 #2
0
    def ppf(self, x):
        """
        Computes the percent point function of the distribution at the point(s)
        x. It is defined as the inverse of the CDF. y = ppf(x) can be
        interpreted as the argument y for which the value of the cdf(x) is equal
        to y. Essentially that means the random varable y is the place on the
        distribution the CDF evaluates to x.

        Parameters
        ----------
        x: array, dtype=float, shape=(m x n), bounds=(0,1)
            The value(s) at which the user would like the ppf evaluated.
            If an array is passed in, the ppf is evaluated at every point
            in the array and an array of the same size is returned.

        Returns
        -------
        ppf: array, dtype=float, shape=(m x n)
            The ppf at each point in x.
        """
        if (x <= 0).any() or (x >= 1).any():
            raise ValueError(
                "all values in x must be between 0 and 1, \
                             exclusive"
            )
        ppf = 1.0 / gammaincinv(self.alpha, 1 - x)

        return ppf
예제 #3
0
파일: rand.py 프로젝트: khasinski/csb
def truncated_gamma(shape=None, alpha=1., beta=1., x_min=None, x_max=None):
    """
    Generate random variates from a lower-and upper-bounded gamma distribution.

    @param shape: shape of the random sample
    @param alpha: shape parameter (alpha > 0.)
    @param beta:  scale parameter (beta >= 0.)
    @param x_min: lower bound of variate
    @param x_max: upper bound of variate    
    @return: random variates of lower-bounded gamma distribution
    """
    from scipy.special import gammainc, gammaincinv
    from numpy.random import gamma
    from numpy import inf

    if x_min is None and x_max is None:
        return gamma(alpha, 1 / beta, shape)
    elif x_min is None:
        x_min = 0.
    elif x_max is None:
        x_max = inf
        
    x_min = max(0., x_min)
    x_max = min(1e300, x_max)

    a = gammainc(alpha, beta * x_min)
    b = gammainc(alpha, beta * x_max)

    return probability_transform(shape,
                                 lambda x, alpha=alpha: gammaincinv(alpha, x),
                                 a, b) / beta
예제 #4
0
def ep_rvs(mu=0, alpha=1, beta=1, size=1):

    u = uniform.rvs(loc=0, scale=1, size=size)
    z = 2 *    np.abs(u - 1. / 2)
    z = gammaincinv(1. / beta, z)
    y = mu + np.sign(u - 1. / 2) * alpha * z**(1. / beta)
    return y
예제 #5
0
  def _testCompareToExplicitDerivative(self, dtype):
    """Compare to the explicit reparameterization derivative.

    Verifies that the computed derivative satisfies
    dsample / dalpha = d igammainv(alpha, u) / dalpha,
    where u = igamma(alpha, sample).

    Args:
      dtype: TensorFlow dtype to perform the computations in.
    """
    delta = 1e-3
    np_dtype = dtype.as_numpy_dtype
    try:
      from scipy import misc  # pylint: disable=g-import-not-at-top
      from scipy import special  # pylint: disable=g-import-not-at-top

      alpha_val = np.logspace(-2, 3, dtype=np_dtype)
      alpha = constant_op.constant(alpha_val)
      sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype)
      actual = gradients_impl.gradients(sample, alpha)[0]

      (sample_val, actual_val) = self.evaluate((sample, actual))

      u = special.gammainc(alpha_val, sample_val)
      expected_val = misc.derivative(
          lambda alpha_prime: special.gammaincinv(alpha_prime, u),
          alpha_val, dx=delta * alpha_val)

      self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
    except ImportError as e:
      tf_logging.warn("Cannot use special functions in a test: %s" % str(e))
예제 #6
0
def inverse_gamma_cdf(p,k,theta,offset):
    """
    Inverse gamma cumulative distribution function.
    """

    x = gammaincinv(k,p)
    x = (x * theta) + offset

    return x
예제 #7
0
 def bn_exact(n):
     """
     Computes :math:`b_n` exactly for the current sersic index, using
     incomplete gamma functions.
     """
     from scipy.special import gammaincinv
     
     n = float(n) #sometimes 0d array gets passed in
     return gammaincinv(2*n,0.5)
예제 #8
0
파일: test_ufig.py 프로젝트: adityacp/hope
def test_ufig_gal_intrinsic():
    try:
        from scipy.special import gammaincinv
    except ImportError:
        pytest.skip("Scipy is not available")

    n = np.uint32(1.88 * np.float64(np.int64(1) << 32) / 10.)
    sersicLTable = np.uint32(n >> np.uint32(32 - 9))
    sersicBTable = np.float32(n - (sersicLTable << np.uint32(32 - 9))) / np.float32(1 << (32 - 9))

    # gamma lookup has 1<<9, 512 elements, and a more precise fitt on 0-1/(1<<3) with 1<<11, 2048 elements
    radiusTable = np.empty(((1 << 9) + 1, (1 << (11 + 1)) + 1, ), dtype=np.float32)
    radiusTable[0][0:(1 << 11)] = (np.power(gammaincinv(2e-15, np.float64(range(0, 1 << 11)) / np.float64(1 << 11)), 1e-15) / 1e-15).astype(np.float32)
    radiusTable[0][(1 << 11):(1 << (11 + 1))] = (np.power(gammaincinv(2e-15, 1. - 1. / np.float64(1 << 3) + np.float64(range(0, 1 << 11)) / np.float64(1 << (11 + 3))), 1e-15) / 1e-15).astype(np.float32)
    radiusTable[0][1 << 11] = (np.power(gammaincinv(2e-15, (1. - 1e-15) / np.float64(1 << 11)), 1e-15) / 1e-15).astype(np.float32)
    radiusTable[0][1 << (11 + 1)] = (np.power(gammaincinv(2e-15, 1. - 1. / np.float64(1 << 3) + (1. - 1e-15) / np.float64(1 << (11 + 3))), 1e-15) / 1e-15).astype(np.float32)

    # TODO: make only one gamma interpolation instead of two
    for i in range(1, (1 << 9) + 1):
        n = 10. * np.float64(i << (32 - 9)) / (np.int64(1) << 32)
        k = gammaincinv(2. * n, 0.5)
        radiusTable[i][0:(1 << 11)] = (np.power(gammaincinv(2. * n, np.float64(range(0, 1 << 11)) / np.float64(1 << 11)), n) / np.power(k, n)).astype(np.float32)
        radiusTable[i][(1 << 11):(1 << (11 + 1))] = (np.power(gammaincinv(2. * n, 1. - 1. / np.float64(1 << 3) + np.float64(range(0, 1 << 11)) / np.float64(1 << (11 + 3))), n) / np.power(k, n)).astype(np.float32)
        radiusTable[i][1 << 11] = (np.power(gammaincinv(2. * n, (1. - 1e-15) / np.float64(1 << 11)), n) / np.power(k, n)).astype(np.float32)
        radiusTable[i][1 << (11 + 1)] = (np.power(gammaincinv(2. * n, 1. - 1. / np.float64(1 << 3) + (1. - 1e-15) / np.float64(1 << (11 + 3))), n) / np.power(k, n)).astype(np.float32)

    def fkt_intrinsic(rng, sersicLTable, sersicBTable, radiusTable):
        drMaski = rng >> (32 - 3) == (1 << 3) - 1
        drKi = rng >> np.uint32(drMaski * 3)
        drLi = (drKi >> (32 - 11)) + np.uint32(drMaski * (1 << 11))
        drBi = np.float64(drKi & ((1 << (32 - 11)) - 1)) / np.float64(1 << (32 - 11))
        drAi = 1. - drBi

        nLi = sersicLTable
        nBi = sersicBTable
        nAi = 1 - nBi

        return drAi * (nAi * radiusTable[nLi, drLi] + nBi * radiusTable[nLi, drLi + 1]) \
           + drBi * (nAi * radiusTable[nLi + 1, drLi] + nBi * radiusTable[nLi + 1, drLi + 1])

    rngBuffer = np.random.randint(0, 1<<32, size=(1000,)).astype(np.uint32)
    hope.config.optimize = True
    hintrinsic = hope.jit(fkt_intrinsic)

    for rng in rngBuffer:
        dr = fkt_intrinsic(rng, sersicLTable, sersicBTable, radiusTable)
        hdr = hintrinsic(rng, sersicLTable, sersicBTable, radiusTable)
        assert check(dr, hdr)
    hope.config.optimize = False
예제 #9
0
def q_lambda2(y, z, n_k, extremes):
    gam_a = n_k; gam_b = sum(z*y)
    u = np.random.uniform(0, 1, len(n_k))
    F_min, F_max = sp.gammainc(gam_a, gam_b*np.array(extremes))
    lambda_const = F_max-F_min

    lambda2 = sp.gammaincinv(gam_a, F_min+u*lambda_const)/gam_b

    idx = np.where(gam_a == 0)
    if len(idx[0]):  # if any values with gam_a==0
        F_min, F_max = np.log(extremes)
        normC = F_max-F_min
        lambda2[idx] = np.exp(u[idx]*normC+F_min)
    return lambda2
예제 #10
0
    def ppf(self,u):
        '''

        Evaluates the percent point function (i.e. the inverse c.d.f.)
        of the current distribution.

        :param u:  Points at which the p.p.f. will be computed.
        :type u: numpy.array
        :returns:  Data object with the resulting points in the domain of this distribution. 
        :rtype:    natter.DataModule.Data
           
        '''
        q = 1/self.param['p']
        s = self.param['s']

        return Data(sign(u-.5) * s**q *gammaincinv(q,abs(2*u-1))**q,'Function values of the p.p.f of %s' % (self.name,))
예제 #11
0
    def ppf(self,u):
        '''

        Evaluates the percent point function (i.e. the inverse c.d.f.)
        of the current distribution.

        :param u:  Points at which the p.p.f. will be computed.
        :type u: numpy.array
        :returns:  Data object with the resulting points in the domain of this distribution. 
        :rtype:    natter.DataModule.Data
           
        '''
        q = 1/self.param['p']
        s = self.param['s']
        v = array([self.param['a'],self.param['b']])
        v = .5 + 0.5*sign(v)*gammainc(1/self.param['p'],abs(v)**self.param['p'] / self.param['s'])
        dv = v[1]-v[0]
        return Data(sign(dv*u+v[0]-.5) * s**q *gammaincinv(q,abs(2*(dv*u+v[0])-1))**q,'Percentiles of %s' % (self.name,))
예제 #12
0
파일: sersic.py 프로젝트: johnnygreco/hugs
    def __init__(self, params, calc_params=False):
        """
        Initialize and calculate a bunch of useful quantities.
        """
        self.params = params
        self.I_e = params['I_e']
        self.r_e = params['r_e']
        self.n = params['n']
        self.X0 = params['X0']
        self.Y0 = params['Y0']
        self.PA = params['PA']
        self.ell = params['ell']
        self.q = 1 - self.ell
        self.theta = self.PA+90
        self.b_n = gammaincinv(2.*self.n, 0.5)

        if calc_params:
            self.calc_params()
예제 #13
0
파일: factory.py 프로젝트: johnnygreco/hugs
def _make_galaxy(pset, bbox_num_reff=10, band='i'):
    """
    Make synthetic Sersic galaxy.

    Parameters
    ----------
    pset : dict, astropy.table.Row
        Sersic parameters. Uses imfit's convention:
        mu_0, r_e, n, X0, Y0, ell, and PA (except for mu_0)
    bbox_num_reff : int, optional
        Number of r_eff to extend the bounding box.
    band : string, optional
        Photometric band (need for central surface brightness).

    Returns
    -------
    galaxy : ndarray
        Image with synthetic galaxy. 
    """

    # convert mu_0 to I_e and r_e to pixels
    mu_0 = pset['mu_0_' + band.lower()]
    b_n = gammaincinv(2.*pset['n'], 0.5)
    mu_e = mu_0 + 2.5*b_n/np.log(10)
    I_e = (pixscale**2)*10**((zpt-mu_e)/2.5)
    r_e = pset['r_e'] / pixscale
    
    # calculate image shape
    side = 2*int(bbox_num_reff*r_e) + 1
    img_shape = (side, side)

    params = dict(X0=img_shape[1]//2,
                  Y0=img_shape[0]//2,
                  I_e=I_e, 
                  r_e=r_e, 
                  n=pset['n'], 
                  ell=pset['ell'],
                  PA=pset['PA'])

    # generate image with synth
    model = Sersic(params)
    galaxy = model.array(img_shape)

    return galaxy 
예제 #14
0
def fastfood_params(n, d):
    d0 = d
    # n0 = n
    l = int(np.ceil(np.log2(d)))
    d = 2**l
    k = int(np.ceil(n / d))
    n = d * k
    print('d0 =', d0, ', d =', d)
    print('n0 =', n, ', n =', n)

    B = []
    G = []
    PI = []
    S = []
    for ii in xrange(k):
        B_ii = rng.choice([-1, 1], size=d)
        G_ii = rng.normal(size=d)
        PI_ii = rng.permutation(d)
        print('PI_ii=', PI_ii)

        B.append(B_ii)
        G.append(G_ii)
        PI.append(PI_ii)

        p1 = rng.uniform(size=d)
        p2 = d / 2
#        print('p1 =',p1,'; p2 =',p2)
        T = gammaincinv(p2, p1)
#        print('T1 =',T)
        T = (T * 2) ** (1 / 2)
#        print('T2 =',T)
        s_i = T * norm(G, 'fro')**(-1)
#        print('s_i =', s_i)
        S_ii = s_i
        S.append(S_ii)

    S1 = np.zeros(n)
    for ii in xrange(k):
        S1[ii * d:(ii + 1) * d] = S[ii]

#    print('Shape of B:',len(B),', B[0]:',B[0].shape)

    return FFPara(B, G, PI, S1)
예제 #15
0
 def _ppf(self, q, a, c):
     val1 = special.gammaincinv(a,q)
     val2 = special.gammaincinv(a,1.0-q)
     ic = 1.0/c
     cond = c+0*val1
     return np.where(cond > 0,val1**ic,val2**ic)
예제 #16
0
 def _ppf(self, q, c):
     return np.log(special.gammaincinv(c,q))
예제 #17
0
 def _ppf(self, q, a):
     return special.gammaincinv(a, q)
예제 #18
0
	def random(self):
		c=self.randomObj.random()
		return gammaincinv(self.a,c)
예제 #19
0
파일: rfunc.py 프로젝트: kes79/pastas
 def get_tmax(self, p, cutoff=None):
     if cutoff is None:
         cutoff = self.cutoff
     return gammaincinv(p[1], cutoff) * p[2]
예제 #20
0
 def _ppf(self, q, nu):
     return numpy.sqrt(1.0 / nu * special.gammaincinv(nu, q))
예제 #21
0
def q_lambda(y, extremes):
    gam_a = len(y); gam_b = sum(y)
    u = np.random.uniform()
    F_min, F_max = sp.gammainc(gam_a, gam_b*np.array(extremes))
    lambda_const = F_max-F_min
    return sp.gammaincinv(gam_a, F_min+u*lambda_const)/gam_b
예제 #22
0
 def _ppf(self, q, a, c):
     val = numpy.where(c > 0, q, 1 - q)
     return special.gammaincinv(a, val)**(1. / c)
예제 #23
0
def find_galaxy_list(skymap_path, log=None):
    # settings:
    config = ConfigParser(inline_comment_prefixes=';')
    config.read('config.ini')
    cat_file = config.get('CATALOG', 'PATH') + config.get(
        'CATALOG', 'NAME') + '.npy'  # galaxy catalog file

    # parameters:
    credzone = config.getfloat(
        'GALAXIES',
        'CREDZONE')  # Localization probability to consider credible
    relaxed_credzone = config.getfloat('GALAXIES', 'RELAXED_CREDZONE')
    nsigmas_in_d = config.getfloat(
        'GALAXIES', 'NSIGMAS_IN_D')  # Sigmas to consider in distance
    relaxed_nsigmas_in_d = config.getfloat('GALAXIES', 'RELAXED_NSIGMAS_IN_D')
    completeness = config.getfloat('GALAXIES', 'COMPLETENESS')
    min_galaxies = config.getfloat(
        'GALAXIES', 'MINGALAXIES')  # minimal number of galaxies to output
    max_galaxies = config.getint(
        'GALAXIES', 'MAXGALAXIES')  # maximal number of galaxies to use

    # magnitude of event in r-band. values are value from Barnes... +-1.5 mag
    minmag = config.getfloat('GALAXIES',
                             'MINMAG')  # Estimated brightest KN abs mag
    maxmag = config.getfloat('GALAXIES',
                             'MAXMAG')  # Estimated faintest KN abs mag
    sensitivity = config.getfloat(
        'GALAXIES', 'SENSITIVITY')  # Estimated faintest app mag we can see

    min_dist_factor = config.getfloat(
        'GALAXIES', 'MINDISTFACTOR'
    )  # reflecting a small chance that the theory is completely wrong and we can still see something

    minL = mag.f_nu_from_magAB(minmag)
    maxL = mag.f_nu_from_magAB(maxmag)

    # Schechter function parameters:
    alpha = config.getfloat('GALAXIES', 'ALPHA')
    MB_star = config.getfloat(
        'GALAXIES', 'MB_STAR'
    )  # random slide from https://www.astro.umd.edu/~richard/ASTRO620/LumFunction-pp.pdf but not really...?

    if log is None:
        log = logging.getLogger(__name__)

    # Read the HEALPix sky map:
    try:
        prob, dist_mu, dist_sigma, dist_norm = hp.read_map(skymap_path,
                                                           field=None,
                                                           verbose=False)
    except Exception as e:
        log.error('Failed to read sky map!')
        send_mail(subject="[GW@Wise] Failed to read LVC sky map",
                  text='''FITS file: {}
                          Exception: {}'''.format(skymap_path, e),
                  log=log)

    # Load the galaxy catalog (glade_id, RA, DEC, distance, Bmag):
    galaxy_cat = np.load(cat_file)
    galaxy_cat = Table(galaxy_cat, names=('ID', 'RA', 'Dec', 'Dist', 'Bmag'))
    galaxy_cat = galaxy_cat[np.where(
        galaxy_cat['Dist'] > 0)]  # remove entries with a negative distance
    galaxy_cat = galaxy_cat[np.where(
        ~np.isnan(galaxy_cat['Bmag']))]  # remove entries with no Bmag

    # Skymap parameters:
    npix = len(prob)
    nside = hp.npix2nside(npix)

    # Convert galaxy WCS (RA, DEC) to spherical coordinates (theta, phi):
    theta = 0.5 * np.pi - np.deg2rad(galaxy_cat['Dec'])
    phi = np.deg2rad(galaxy_cat['RA'])
    d = np.array(galaxy_cat['Dist'])

    # Convert galaxy coordinates to skymap pixels:
    galaxy_pix = hp.ang2pix(nside, theta, phi)

    # Most probable sky location
    theta_maxprob, phi_maxprob = hp.pix2ang(nside, np.argmax(prob))
    ra_maxprob = np.rad2deg(phi_maxprob)
    dec_maxprob = np.rad2deg(0.5 * np.pi - theta_maxprob)

    # Convert to coordinates
    ra_maxprob = Angle(ra_maxprob * u.deg)
    dec_maxprob = Angle(dec_maxprob * u.deg)

    # Find given percent probability zone (default is 99%):
    prob_cutoff = 1
    prob_sum = 0
    npix_credzone = 0

    prob_sorted = np.sort(prob, kind="stable")
    while prob_sum < credzone:
        prob_sum = prob_sum + prob_sorted[-1]
        prob_cutoff = prob_sorted[-1]
        prob_sorted = prob_sorted[:-1]
        npix_credzone = npix_credzone + 1

    # area = npix_credzone * hp.nside2pixarea(nside, degrees=True)

    ####################################################

    # calculate probability for galaxies by the localization map:
    p = prob[galaxy_pix]
    p_dist = dist_norm[galaxy_pix] * norm(dist_mu[galaxy_pix],
                                          dist_sigma[galaxy_pix]).pdf(d)

    # cutoffs - 99% of probability by angles and 3sigma by distance:
    within_dist_idx = np.where(
        np.abs(d - dist_mu[galaxy_pix]) < nsigmas_in_d *
        dist_sigma[galaxy_pix])
    within_credzone_idx = np.where(p >= prob_cutoff)
    within_idx = np.intersect1d(within_credzone_idx, within_dist_idx)

    do_mass_cutoff = True

    # Relax credzone limits if no galaxies are found:
    if within_idx.size == 0:
        while prob_sum < relaxed_credzone:
            if prob_sorted.size == 0:
                break
            prob_sum = prob_sum + prob_sorted[-1]
            prob_cutoff = prob_sorted[-1]
            prob_sorted = prob_sorted[:-1]
            npix_credzone = npix_credzone + 1
        within_dist_idx = np.where(
            np.abs(d - dist_mu[galaxy_pix]) < relaxed_nsigmas_in_d *
            dist_sigma[galaxy_pix])
        within_credzone_idx = np.where(p >= prob_cutoff)
        within_idx = np.intersect1d(within_credzone_idx, within_dist_idx)
        do_mass_cutoff = False

    p = p[within_idx]
    p = (p * (p_dist[within_idx]))  # d**2?

    galaxy_cat = galaxy_cat[within_idx]

    if len(galaxy_cat) == 0:
        log.warning("No galaxies in field!")
        log.warning("99.995% of probability is ",
                    npix_credzone * hp.nside2pixarea(nside, degrees=True),
                    "deg^2")
        log.warning("Peaking at (deg) RA = {}, Dec = {}".format(
            ra_maxprob.to_string(unit=u.hourangle,
                                 sep=':',
                                 precision=2,
                                 pad=True),
            dec_maxprob.to_string(sep=':',
                                  precision=2,
                                  alwayssign=True,
                                  pad=True)))
        return

    # Normalize luminosity to account for mass:
    luminosity = mag.L_nu_from_magAB(galaxy_cat['Bmag'] -
                                     5 * np.log10(galaxy_cat['Dist'] *
                                                  (10**5)))
    luminosity_norm = luminosity / np.sum(luminosity)
    normalization = np.sum(p * luminosity_norm)
    score = p * luminosity_norm / normalization

    # Take 50% of mass:

    # The area under the Schechter function between L=inf and the brightest galaxy in the field:
    missing_piece = gammaincc(
        alpha + 2,
        10**(-(min(galaxy_cat['Bmag'] - 5 * np.log10(galaxy_cat['Dist'] *
                                                     (10**5))) - MB_star) /
             2.5))
    # there are no galaxies brighter than this in the field, so don't count that part of the Schechter function

    while do_mass_cutoff:
        MB_max = MB_star + 2.5 * np.log10(
            gammaincinv(alpha + 2, completeness + missing_piece))

        if (min(galaxy_cat['Bmag'] - 5 * np.log10(galaxy_cat['Dist'] *
                                                  (10**5))) - MB_star) > 0:
            MB_max = 100  # if the brightest galaxy in the field is fainter than the cutoff brightness - don't cut by brightness

        brightest = np.where(
            galaxy_cat['Bmag'] - 5 * np.log10(galaxy_cat['Dist'] *
                                              (10**5)) < MB_max)
        # print MB_max
        if len(brightest[0]) < min_galaxies:
            # Not enough galaxies, allowing fainter galaxies
            if completeness >= 0.9:  # Tried hard enough, just take all of them
                completeness = 1  # Just to be consistent
                do_mass_cutoff = False
            else:
                completeness = (completeness + (1. - completeness) / 2)
        else:  # got enough galaxies
            galaxy_cat = galaxy_cat[brightest]
            p = p[brightest]
            luminosity_norm = luminosity_norm[brightest]
            score = score[brightest]
            do_mass_cutoff = False

    # Account for the distance
    absolute_sensitivity = sensitivity - 5 * np.log10(galaxy_cat['Dist'] *
                                                      (10**5))

    absolute_sensitivity_lum = mag.f_nu_from_magAB(absolute_sensitivity)
    distance_factor = np.zeros(len(galaxy_cat))

    distance_factor[:] = ((maxL - absolute_sensitivity_lum) / (maxL - minL))
    distance_factor[min_dist_factor > (maxL - absolute_sensitivity_lum) /
                    (maxL - minL)] = min_dist_factor
    distance_factor[absolute_sensitivity_lum < minL] = 1
    distance_factor[absolute_sensitivity > maxL] = min_dist_factor

    # Sort galaxies by probability
    ranking_idx = np.argsort(p * luminosity_norm * distance_factor,
                             kind="stable")[::-1]

    # # Count galaxies that constitute 50% of the probability (~0.5*0.98)
    # sum = 0
    # galaxies50per = 0
    # sum_seen = 0
    # while sum < 0.5:
    #     if galaxies50per >= len(ranking_idx):
    #         break
    #     sum = sum + (p[ranking_idx[galaxies50per]] * luminosity_norm[ranking_idx[galaxies50per]]) / float(normalization)
    #     sum_seen = sum_seen + (p[ranking_idx[galaxies50per]] * luminosity_norm[ranking_idx[galaxies50per]] * distance_factor[ranking_idx[galaxies50per]]) / float(normalization)
    #     galaxies50per = galaxies50per + 1
    #
    # # Event statistics:
    # # Ngalaxies_50percent = the number of galaxies consisting 50% of probability (including luminosity but not distance factor)
    # # actual_percentage = usually around 50
    # # seen_percentage = if we include the distance factor - how much do the same galaxies worth
    # # 99percent_area = area of map in [deg^2] consisting 99% (using only the map from LIGO)
    # stats = {"Ngalaxies_50percent": galaxies50per, "actual_percentage": sum*100, "seen_percentage": sum_seen, "99percent_area": area}

    # Limit the maximal number of galaxies to use:
    if len(ranking_idx) > max_galaxies:
        n = max_galaxies
    else:
        n = len(ranking_idx)

    # Create sorted galaxy list (glade_id, RA, DEC, distance(Mpc), Bmag, score, distance factor (between 0-1))
    # The score is normalized so that all the galaxies in the field sum to 1 (before applying luminosity cutoff)
    galaxylist = np.ndarray((n, 7))
    for i in range(ranking_idx.shape[0])[:n]:
        ind = ranking_idx[i]
        galaxylist[i, :] = [
            galaxy_cat[ind]['ID'], galaxy_cat[ind]['RA'],
            galaxy_cat[ind]['Dec'], galaxy_cat[ind]['Dist'],
            galaxy_cat[ind]['Bmag'], score[ind], distance_factor[ind]
        ]

        # Update galaxy table in SQL database:
        lvc_galaxy_dict = {
            'voeventid': '(SELECT MAX(id) from voevent_lvc)',
            'score': score[ind],
            'gladeid': galaxy_cat[ind]['ID']
        }
        mysql_update.insert_values('lvc_galaxies', lvc_galaxy_dict)

    return galaxylist, ra_maxprob, dec_maxprob
예제 #24
0
print(mean2)

median2 = 5.3481
print(median2)
variance2 = k * tetha * tetha
print(variance2)

m = symbols('m')
x = symbols('x')

fx = integrate(((x ** (k-1)) * exp(-x/tetha)/((tetha ** k) * gamma(k))), (x, m, oo))
#print(fx, "\n")



quartile2_1 = ss.gammaincinv(k, 0.25) * tetha
quartile2_2 = ss.gammaincinv(k, 0.5) * tetha
quartile2_3 = ss.gammaincinv(k, 0.75) * tetha
print(quartile2_1, quartile2_2, quartile2_3)
iqr2 = quartile2_3 - quartile2_1
print(iqr2, "\n")

def delta(a, b):
    return a - b

print(delta(mean1, mean2), delta(median1, median2), delta(variance1, variance2), delta(iqr1, iqr2), "\n")

i1 = mean1 - sqrt(variance1/len(rozpodil)) * np.percentile(rozpodil, 3)
i2 = mean1 + sqrt(variance1/len(rozpodil)) * np.percentile(rozpodil, 3)
print(i1, i2, "\n")
예제 #25
0
 def _upper(self, a, c):
     cond = c > 0
     val = numpy.where(cond, 1 - 1e-15, 1e-15)
     return special.gammaincinv(a, val)**(1. / c)
예제 #26
0
def SourceProfile(xsource,ysource,source,lens):
      """
      Creates the source-plane profile of the given Source.

      Inputs:
      xsource,ysource:
            Source-plane coordinates, in arcsec, on which to
            calculate the luminosity profile of the source
      
      Source:
            Any supported source-plane object, e.g. a GaussSource
            object. The object will contain all the necessary
            parameters to create the profile.

      Lens:
            Any supported Lens object, e.g. an SIELens. We only need
            this because, in the case of single lenses, the source
            position is defined as offset from the lens centroid. If
            there is more than one lens, or if the source is unlensed,
            the source position is defined **relative to the field 
            center, aka (0,0) coordinates**.
            

      Returns:
      I:
            The luminosity profile of the given Source. Has same
            shape as xsource and ysource. Note: returned image has
            units of flux / arcsec^2 (or whatever the x,y units are),
            so to properly normalize, must multiply by pixel area. This
            isn't done here since the lensing means the pixels likely
            aren't on a uniform grid.
      """
      
      lens = list(np.array([lens]).flatten())

      # First case: a circular Gaussian source.
      if source.__class__.__name__=='GaussSource':
            sigma = source.width['value']
            amp   = source.flux['value']/(2.*np.pi*sigma**2.)
            if source.lensed:# and len(lens)==1:
                  xs = source.xoff['value'] + lens[0].x['value']
                  ys = source.yoff['value'] + lens[0].y['value']
            else:
                  xs = source.xoff['value']
                  ys = source.yoff['value']
            
            return amp * np.exp(-0.5 * (np.sqrt((xsource-xs)**2.+(ysource-ys)**2.)/sigma)**2.)

      elif source.__class__.__name__=='SersicSource':
            if source.lensed:# and len(lens)==1:
                  xs = source.xoff['value'] + lens[0].x['value']
                  ys = source.yoff['value'] + lens[0].y['value']
            else:
                  xs = source.xoff['value']
                  ys = source.yoff['value']
            PA, ar = source.PA['value']*deg2rad, source.axisratio['value']
            majax, index = source.majax['value'], source.index['value']
            dX = (xsource-xs)*np.cos(PA) + (ysource-ys)*np.sin(PA)
            dY = (-(xsource-xs)*np.sin(PA) + (ysource-ys)*np.cos(PA))/ar
            R = np.sqrt(dX**2. + dY**2.)
            
            # Calculate b_n, to make reff enclose half the light; this approx from Ciotti&Bertin99
            # This approximation good to 1 in 10^4 for n > 0.36; for smaller n it gets worse rapidly!!
            #bn = 2*index - 1./3. + 4./(405*index) + 46./(25515*index**2) + 131./(1148175*index**3) - 2194697./(30690717750*index**4)
            # Note, now just calculating directly because everyone's scipy
            # should be sufficiently modern.
            bn = gammaincinv(2. * index, 0.5)
            
            # Backing out from the integral to R=inf of a general sersic profile
            Ieff = source.flux['value'] * bn**(2*index) / (2*np.pi*majax**2 * ar * np.exp(bn) * index * gamma(2*index))
            
            return Ieff * np.exp(-bn*((R/majax)**(1./index)-1.))
      
      elif source.__class__.__name__=='PointSource':
            if source.lensed:# and len(lens)==1:
                  #xs = source.xoff['value'] + lens[0].x['value']
                  #ys = source.yoff['value'] + lens[0].y['value']
                  return ValueError("Lensed point sources not working yet... try a"\
                   "gaussian with small width instead...")
            else:
                  xs = source.xoff['value']
                  ys = source.yoff['value']
                  
            yloc = np.abs(xsource[0,:] - xs).argmin()
            xloc = np.abs(ysource[:,0] - ys).argmin()
            
            m = np.zeros(xsource.shape)
            m[xloc,yloc] += source.flux['value']/(xsource[0,1]-xsource[0,0])**2.
            
            return m
            
      
      else: raise ValueError("So far only GaussSource, SersicSource, and "\
            "PointSource objects supported...")
예제 #27
0
def _fit_eeg_distribution(X,
                          min_clean_fraction=None,
                          max_dropout_fraction=None,
                          quantile_range=None,
                          step_sizes=None,
                          beta_range=None):
    """ Estimate the mean and standard deviation of clean EEG from contaminated data

    This function estimates the mean and standard deviation of clean EEG from a sample of amplitude
    values (that have preferably been computed over short windows) that may include a large fraction
    of contaminated samples. The clean EEG is assumed to represent a generalized Gaussian component in
    a mixture with near-arbitrary artifact components. By default, at least 25% (min_clean_fraction) of
    the data must be clean EEG, and the rest can be contaminated. No more than 10%
    (max_dropout_fraction) of the data is allowed to come from contamination that cause lower-than-EEG
    amplitudes (e.g., sensor unplugged). There are no restrictions on artifacts causing
    larger-than-EEG amplitudes, i.e., virtually anything is handled (with the exception of a very
    unlikely type of distribution that combines with the clean EEG samples into a larger symmetric
    generalized Gaussian peak and thereby "fools" the estimator). The default parameters should be
    fine for a wide range of settings but may be adapted to accommodate special circumstances.

    The method works by fitting a truncated generalized Gaussian whose parameters are constrained by
    min_clean_fraction, max_dropout_fraction, quantile_range, and beta_range. The alpha and beta parameters
    of the gen. Gaussian are also returned. The fit is performed by a grid search that always finds a
    close-to-optimal solution if the above assumptions are fulfilled.

    Parameters
    ----------
    X : ndarray, shape (n_samples,)
        vector of amplitude values of EEG, possible containing artifacts
        (coming from single samples or windowed averages)

    min_clean_fraction : float (default: 0.25)
        Minimum fraction of values in X that needs to be clean

    max_dropout_fraction : float (default: 0.1)
        Maximum fraction of values in X that can be subject to
        signal dropouts (e.g., sensor unplugged)

    quantile_range : ndarray, shape (2,) (default: [0.022 0.6])
        Quantile range [lower,upper] of the truncated generalized Gaussian distribution
        that shall be fit to the EEG contents

    step_sizes : ndarray, shape (2,) (default: [0.01 0.01])
        Step size of the grid search; the first value is the stepping of the lower bound
        (which essentially steps over any dropout samples), and the second value
        is the stepping over possible scales (i.e., clean-data quantiles)

    beta_range : ndarray, shape (n_points,) (default: np.arange(1.70, 3.51, 0.15))
        Range that the clean EEG distribution's shape parameter beta may take

    Returns
    -------
    Mu : float
        estimated mean of the clean EEG distribution

    Sigma : float
        estimated standard deviation of the clean EEG distribution

    Alpha : float
        estimated scale parameter of the generalized Gaussian clean EEG distribution (optional)

    Beta : float
        estimated shape parameter of the generalized Gaussian clean EEG distribution (optional)

    """

    # sanity checks
    if len(X.shape) > 1:
        raise ValueError('X needs to be a 1D ndarray.')

    # default parameters
    if min_clean_fraction is None:
        min_clean_fraction = 0.25
    if max_dropout_fraction is None:
        max_dropout_fraction = 0.1
    if quantile_range is None:
        quantile_range = np.array([0.022, 0.6])
    if step_sizes is None:
        step_sizes = np.array([0.01, 0.01])
    if beta_range is None:
        beta_range = np.arange(1.7, 3.51, 0.15)

    # check valid parameters
    n = len(X)
    quantile_range = np.array(quantile_range)
    step_sizes = np.array(step_sizes)
    beta_range = np.array(beta_range)

    if not len(quantile_range) == 2:
        raise ValueError('quantile_range needs to be a 2-elements vector.')
    if any(quantile_range > 1) | any(quantile_range < 0):
        raise ValueError('Unreasonable quantile_range.')
    if any(step_sizes < 0.0001) | any(step_sizes > 0.1):
        raise ValueError('Unreasonable step sizes.')
    if any(step_sizes * n < 1):
        raise ValueError(
            f"Step sizes compared to actual number of samples available, step_sizes * n should be "
            f"greater than 1 (current value={step_sizes * n}. More training data required."
        )
    if any(beta_range >= 7) | any(beta_range <= 1):
        raise ValueError('Unreasonable shape range.')

    # sort data for quantiles
    X = np.sort(X)

    # compute z bounds for the truncated standard generalized Gaussian pdf and pdf rescaler for each beta
    zbounds = []
    rescale = []
    for k, b in enumerate(beta_range):
        zbounds.append(
            np.sign(quantile_range - 0.5) * gammaincinv(
                (1 / b),
                np.sign(quantile_range - 0.5) *
                (2 * quantile_range - 1))**(1 / b))
        rescale.append(b / (2 * gamma(1 / b)))

    # determine the quantile-dependent limits for the grid search and convert everything in samples

    # we can generally skip the tail below the lower quantile
    lower_min = int(round(min(quantile_range) * n))
    # maximum width in samples is the fit interval if all data is clean
    max_width = int(round(n * np.diff(quantile_range)[0]))
    # minimum width in samples of the fit interval, as fraction of data
    min_width = int(round(min_clean_fraction * n *
                          np.diff(quantile_range)[0]))  #
    max_dropout_fraction_n = int(round(max_dropout_fraction * n))
    step_sizes_n = np.round(step_sizes * n).astype(int)
    assert any(
        step_sizes_n >= 1)  # should be catched earlier but double-checking

    # get matrix of shifted data ranges
    indx = np.arange(lower_min, lower_min + max_dropout_fraction_n + 0.5,
                     step_sizes_n[0]).astype(int)  # epochs start
    assert indx.dtype == "int"

    range_ind = np.arange(0, max_width)  # interval indices
    Xs = np.zeros(
        (max_width, len(indx)))  # preload entire quantile interval matrix
    for k, i in enumerate(indx):
        Xs[:, k] = X[i + range_ind]  # build each quantile interval

    X1 = Xs[0, :]
    Xs = Xs - X1  # substract baseline value for each interval (starting at 0)

    # gridsearch to find optimal fitting coefficient based on given parameters
    opt_val = float("inf")
    opt_lu = float("inf")
    opt_bounds = float("inf")
    opt_beta = float("inf")
    gridsearch_val = np.arange(max_width - 1, min_width,
                               -step_sizes_n[0]).astype(int)

    for m in gridsearch_val:  # gridsearch for different quantile interval
        # scale and bin the data in the intervals
        nbins = int(round(3 * np.log2(1 + m / 2))) + 1  # scale interval
        H = Xs[range(m), :] * nbins / Xs[m - 1, :]  # scale data bins
        binscounts = np.zeros((nbins, H.shape[1]))  # init bincounts
        for k in range(H.shape[1]):
            binscounts[:, k], _ = np.histogram(H[:, k], nbins)

        logq = np.log(binscounts + 0.01)  # return log(bincounts) in intervals

        # for each shape value...
        for k, beta in enumerate(beta_range):
            bounds = zbounds[k]

            # evaluate truncated generalized Gaussian pdf at bin centers
            x = bounds[0] + np.linspace(0.5, (nbins - 0.5),
                                        num=nbins) / nbins * np.diff(bounds)[0]
            p = np.exp(-np.abs(x)**beta) * rescale[k]
            p = p / np.sum(p)

            # calc KL divergences for the specific interval
            kl = np.sum(p *
                        (np.log(p) - np.transpose(logq)), axis=1) + np.log(m)

            # update optimal parameters
            idx = np.argmin(kl)
            if kl[idx] < opt_val:
                opt_val = kl[idx]
                opt_beta = beta
                opt_bounds = bounds
                opt_lu = [X1[idx], X1[idx] + Xs[m, idx]]

    # recover distribution parameters at optimum
    alpha = (opt_lu[1] - opt_lu[0]) / np.diff(opt_bounds)[0]
    mu = opt_lu[0] - opt_bounds[0] * alpha
    beta = opt_beta

    # calculate the distribution's standard deviation from alpha and beta
    sig = np.sqrt((alpha**2) * gamma(3 / beta) / gamma(1 / beta))

    return mu, sig, alpha, beta
예제 #28
0
 def expected_grad(s, c, r):
     u = sp_special.gammainc(c, s * r)
     delta = 1e-4
     return sp_misc.derivative(
         lambda x: sp_special.gammaincinv(x, u), c, dx=delta * c) / r
예제 #29
0
 def _ppf(self, q, a, b):
     return b/gammaincinv(a,1-q)
예제 #30
0
def sernorm(n):
	return gammaincinv(2.*n, 0.5)
예제 #31
0
d0, n0 = d, n
l = T.ceil(T.log2(d))  # TODO cast to int
d = 2**l
k = T.ceil(n/d)  # TODO cast to int
n = d*k
# generate parameter 'matrices'
B = rng.choice([-1, 1], size=(k, d))
G = rng.normal(size=(k, d), dtype=np.float64)
PI = np.array([rng.permutation(d) for _ in xrange(k)]).T
S = np.empty((k*d, 1), dtype=np.float64)
# generate scaling matrix, S
for i in xrange(k):
    for j in xrange(d):
        p1 = rng.uniform(size=d)
        p2 = d/2
        Tmp = gammaincinv(p2, p1)
        Tmp = T.sqrt(2*Tmp)
        s_ij = Tmp * norm(G, 'fro')**(-1)
        S[i+j] = s_ij
fastfood_params = function([n, d], [B, G, PI, S])


# {{{ Fastfood for kernel }}}
# params
X = T.dmatrix('X')  # COLUMNS are input patterns (d0 dims, m samples)
B = T.dmatrix('B')
G = T.dmatrix('G')
PI = T.dmatrix('PI')
S = T.dmatrix('S')
para = [B, G, PI, S]
sgm = T.dscalar('sgm')
예제 #32
0
파일: Sersic.py 프로젝트: vimarian/psfMC
 def kappa(index):
     """
     Sersic profile exponential scaling factor, called either kappa or b_n
     Ciotti & Bertin 1999, A&A, 352, 447 Eqn 5, exact formula!
     """
     return gammaincinv(2 * index, 0.5)
예제 #33
0
def sigmaz(t, y, err, nseg, diagplot=False):
    """Compute sigma_z from lists of measurement times and values.

    Input:
    ------
    t: array of floats
      The measurement times (days).
    y: array of floats
      The measurement values (seconds).
    err: array of floats (1D)
      Error bars of the measurements (seconds).
    nseg : array of ints
      In each iteration, the total time span of the measurements will be split into Nseg segments. This array contains all the values of Nseg we want to use. 
    diagplot: bool
      Make a diagnostic plot of the polynomial fit to the full set of measurements.

    Output:
    -------
    sz_corr : array of floats
      Values of bias-corrected sigma-z for different segment length tau.
    szerr_lower : array of floats
      Lower error bars for the sigma-z values.
    szerr_upper : array of floats
      Upper error bars for the sigma-z values.
    tz : array of floats
      The values of the segment lengths, tau (days), for which sigma-z was calculated.
    nsegments : array of ints
      How many segments of each recorded length passed all criteria for being used when calculating tau and sigma-z statistics.
    """

    # The length of the output arrays depends on how many segments meet our criteria of more than 6 points, and longer than T/sqrt(2)
    sz = []
    tz = []
    ngood = []  # How many good segments went into each tz,sz point

    toas = t
    toaerr = err
    toares = y

    # Total span of the TOAs
    durationday = (toas[-1] - toas[0])  # days
    durationsec = durationday * 86400.0  # seconds

    #The amount of wiggle room for the TOAs to fall on the other side of the segment range and still be included. It's really only supposed to account for roundoff error.  We have no reason to expect TOAs to fall on the border except for the first and last TOAs of the whole batch, so I don't believe we're in danger of double counting any TOAs.
    wiggle = 1e-5

    # Polynomial order to fit (a cubic may fail to produce a good fit for a long TOA span for pulsars with a lot of red noise; it fails for the NANOGrav data set on B1937+21).
    polyorder = 3

    for iseg in nseg:
        # For each duration of length durationday/iseg compute sz.
        dur_oneseg = durationday / iseg  # how long is one segment
        ngoodsegs = 0  # Reset the counter for good segments
        C3sqr = 0  # This will accumulate values of C3sqr
        C3un_sum = 0  # This will accumulate the sum of 1/C3_sigma^2 to normalize the C3^2 weights at the end

        n_sing_matrix = 0  # how many segments make polyfit fail with a singular matrix error
        n_few_points = 0  # how many segments have too few points
        n_short_dataspan = 0  # in how many segments the points are clustered within too small a portion of the selected time span
        n_C3_neg_var = 0  # for how many segments the C3 coefficient has a negative variance in the covariance matrix
        for jseg in range(
                0, iseg):  # Now loop through each segment of this length
            # for iseq > 1 there are multiple segments we need to analyze
            segrange = (toas[0] + dur_oneseg * jseg,
                        toas[0] + dur_oneseg * (jseg + 1))
            centertime = (segrange[1] + segrange[0]
                          ) / 2.0  # Midpoint of observation interval
            # Fit the polynomial using only the toas in the interval
            desind = np.where((toas > (segrange[0] - wiggle))
                              & (toas < (segrange[1] + wiggle)))

            if (
                    np.size(desind)
            ) > polyorder + 3:  # if cov. matrix needed for error estimates on fitted params
                #if (np.size(desind))>polyorder: # if cov. matrix not needed
                dataspan = np.max(toas[desind]) - np.min(toas[desind])
            else:
                n_few_points = n_few_points + 1
                continue

            # Matsakis recommends segment be longer than dur_oneseg/sqrt(2)
            if (dataspan <=
                (dur_oneseg / np.sqrt(2))):  #xAL added this criterion
                n_short_dataspan = n_short_dataspan + 1
                continue
            else:
                res = toares[desind]
                toaerrs = toaerr[desind]

                try:
                    #NOTE: polyfit needs 1/sigma, not 1/sigma^2 weights. Times and residuals need to be in the same units, here are in seconds
                    p, pcov = np.polyfit((toas[desind] - centertime) * 86400.0,
                                         res.astype(np.float),
                                         polyorder,
                                         cov=True,
                                         full=False,
                                         w=np.abs(1. / toaerrs))
                    #p = np.polyfit((toas[desind]-centertime)*86400.0,
                    #    res.astype(np.float),polyorder, cov=False, full=False, w = np.abs(1./toaerrs) )
                except:
                    #print('Polyfit failed!')
                    #traceback.print_exc()
                    n_sing_matrix = n_sing_matrix + 1
                    continue

                # Get C3 coefficient uncertainty from the covariance matrix
                C3variance = np.diag(pcov)[-4]
                if C3variance < 0:
                    n_C3_neg_var = n_C3_neg_var + 1
                    #print('C3variance = %e' % C3variance)
                    continue

                C3un = np.sqrt(C3variance)
                C3un_sum = C3un_sum + 1.0 / C3un**2  # for normalizing weights at the end
                C3sqr = C3sqr + p[-4]**2 / C3un**2
                #C3sqr=C3sqr+p[0]**2    # Accumulate to eventually find avg C3^2
                ngoodsegs += 1  # the number of good segments (with at least 6 TOAs in them)

            # Plot data and fit for case where the full set of resids is treated as one segment
            if (iseg == 1 and diagplot):
                fig = plt.figure()
                ax = fig.add_subplot(1, 1, 1)
                toas_secs = (toas[desind] - centertime) * 86400.0
                ax.plot(toas[desind], res.astype(np.float) * 1.e6, 'ko')
                ax.errorbar(toas[desind],
                            res.astype(np.float) * 1.e6,
                            yerr=toaerr[desind] * 1.e6,
                            fmt='none',
                            color='k',
                            capsize=2.0)
                ax.plot(toas[desind], np.polyval(p, toas_secs) * 1.e6, 'r')
                ax.set_xlabel('MJD')
                ax.set_ylabel('Res (us)')
                plt.title('Order-%d polynomial fit to full TOA set' %
                          polyorder)
                #plt.savefig("sigmaz-diagnostic.png", dpi=300, format='png', bbox_inches='tight')

        print(
            "Divided data into %d segments of length %.1f days. Number of good segments: %d"
            % (iseg, dur_oneseg, ngoodsegs))
        if n_few_points > 0:
            print('--->Segments with too few TOAs: %d' % n_few_points)
        if n_short_dataspan > 0:
            print('--->Segments with too short TOA span: %d' %
                  n_short_dataspan)
        if n_sing_matrix > 0:
            print('--->Segments causing singular matrix error in polyfit: %d' %
                  n_sing_matrix)
        if n_C3_neg_var > 0:
            print('--->Segments with C3 variance <0: %d' % n_C3_neg_var)

        if ngoodsegs != 0:
            #C3sqr=C3sqr/ngoodsegs # unweighted average
            C3sqr = C3sqr / C3un_sum  # average weighted by the uncertainties in fitted C3 values
            sz.append((dur_oneseg * 86400)**2 * np.sqrt(C3sqr) /
                      (2.0 * np.sqrt(5.0)))  # sigma_z formula
            tz.append(dur_oneseg)  #days
            ngood.append(ngoodsegs)

    # Sigma-z bias correction and error bars
    nsegments = np.array(ngood)
    x16 = np.divide(gammaincinv(0.5 * nsegments, 0.16), 0.5 * nsegments)
    x50 = np.divide(gammaincinv(0.5 * nsegments, 0.50), 0.5 * nsegments)
    x84 = np.divide(gammaincinv(0.5 * nsegments, 0.84), 0.5 * nsegments)
    sz_corr = np.divide(sz, np.sqrt(x50))
    szerr_upper = np.multiply(sz_corr, np.sqrt(np.divide(x50, x16)) - 1.0)
    szerr_lower = np.multiply(sz_corr, 1.0 - np.sqrt(np.divide(x50, x84)))

    return sz_corr, szerr_lower, szerr_upper, tz, nsegments
예제 #34
0
	def __init__(self, data, params, data_handler, variables, weights=None):

		# store mandatory parameters in class
		self._dt           		= params['time_step'   ]	# time-step of the data
		self._nt           		= params['n_snapshots' ]	# number of time-frames
		self._xdim         		= params['n_space_dims'] 	# number of spatial dimensions
		self._nv           		= params['n_variables' ]	# number of variables
		self._n_DFT        		= int(params['n_DFT'   ])	# number of DFT (per block)

		# store optional parameters in class
		self._overlap      		= params.get('overlap', 0)			  	 # percentage overlap
		self._mean_type    		= params.get('mean_type', 'longtime')	 # type of mean
		self._normalize_weights = params.get('normalize_weights', False) # normalize weights if required
		self._normalize_data 	= params.get('normalize_data', False)    # normalize data by variance if required
		self._n_modes_save      = params.get('n_modes_save', 1e10)       # default is all (large number)
		self._conf_level		= params.get('conf_level', 0.95) 	     # what confidence level to use fo eigs
		self._reuse_blocks 		= params.get('reuse_blocks', False)      # reuse blocks if present
		self._savefft           = params.get('savefft', False) 		     # save fft block if required
		self._save_dir          = params.get('savedir', os.path.join(CWD, 'results')) # where to save data

		# type of data management
		# - data_handler: read type online
		# - not data_handler: data is entirely pre-loaded
		self._data_handler = data_handler
		self._variables = variables
		if data_handler:
			self._data = data
			X = data_handler(self._data, t_0=0, t_end=1, variables=variables)
			if self._nv == 1 and (X.ndim != self._xdim + 2):
				X = X[...,np.newaxis]
		else:
			def data_handler(data, t_0, t_end, variables):
				if t_0 > t_end:
					raise ValueError('`t_0` cannot be greater than `t_end`.')
				elif t_0 >= self._nt:
					raise ValueError('`t_0` cannot be greater or equal to time dimension.')
				elif t_0 == t_end:
					ti = np.arange(t_0, t_0+1)
					d = data[[t_0],...,:]
				else:
					ti = np.arange(t_0, t_end)
					d = data[ti,...,:]
				return d
			self._data_handler = data_handler
			self._data = np.array(data)
			X = self._data_handler(self._data, t_0=0, t_end=0, variables=self._variables)
			if self._nv == 1 and (self._data.ndim != self._xdim + 2):
				X = X[...,np.newaxis]
				self._data = self._data[...,np.newaxis]

		# get data dimensions and store in class
		self._nx     = X[0,...,0].size
		self._dim    = X.ndim
		self._shape  = X.shape
		self._xdim   = X[0,...,0].ndim
		self._xshape = X[0,...,0].shape

		# check weights
		if isinstance(weights, dict):
			self._weights = weights['weights']
			self._weights_name = weights['weights_name']
			if np.size(self._weights) != int(self.nx * self.nv):
				raise ValueError(
					'parameter ``weights`` must have the '
					'same size as flattened data spatial '
					'dimensions, that is: ', int(self.nx * self.nv))
		else:
			self._weights = np.ones(self._xshape+(self._nv,))
			self._weights_name = 'uniform'
			warnings.warn(
				'Parameter `weights` not equal to an `numpy.ndarray`.'
				'Using default uniform weighting')

		# normalize weigths if required
		if self._normalize_weights:
			self._weights = utils_weights.apply_normalization(
				data=self._data,
				weights=self._weights,
				n_variables=self._nv,
				method='variance')

		# flatten weights to number of spatial point
		try:
			self._weights = np.reshape(
				self._weights, [int(self._nx*self._nv), 1])
		except:
			raise ValurError(
				'parameter ``weights`` must be cast into '
				'1d array with dimension equal to flattened '
				'spatial dimension of data.')

		# Determine whether data is real-valued or complex-valued-valued
		# to decide on one- or two-sided spectrum from data
		self._isrealx = np.isreal(X[0]).all()

		# get default spectral estimation parameters and options
		# define default spectral estimation parameters
		if isinstance(self._n_DFT, int):
			self._window = SPOD_base._hamming_window(self._n_DFT)
			self._window_name = 'hamming'
		else:
			self._n_DFT = int(2**(np.floor(np.log2(self.nt / 10))))
			self._window = SPOD_base._hamming_window(self._n_DFT)
			self._window_name = 'hamming'
			warnings.warn(
				'Parameter `n_DFT` not equal to an integer.'
				'Using default `n_DFT` = ', self._n_DFT)

		# define block overlap
		self._n_overlap = int(np.ceil(self._n_DFT * self._overlap / 100))
		if self._n_overlap > self._n_DFT - 1:
			raise ValueError('Overlap is too large.')

		# define number of blocks
		self._n_blocks = \
			int(np.floor((self.nt - self._n_overlap) \
			/ (self._n_DFT - self._n_overlap)))

		# set number of modes to save
		if self._n_modes_save > self._n_blocks:
			self._n_modes_save = self._n_blocks

		# test feasibility
		if (self._n_DFT < 4) or (self._n_blocks < 2):
			raise ValueError(
				'Spectral estimation parameters not meaningful.')

		# apply mean
		self.select_mean()

		# get frequency axis
		self.get_freq_axis()

		# determine correction for FFT window gain
		self._winWeight = 1 / np.mean(self._window)
		self._window = self._window.reshape(self._window.shape[0], 1)

		# get default for confidence interval
		self._xi2_upper = 2 * sc.gammaincinv(self._n_blocks, 1 - self._conf_level)
		self._xi2_lower = 2 * sc.gammaincinv(self._n_blocks,     self._conf_level)
		self._eigs_c = np.zeros([self._n_freq,self._n_blocks,2], dtype='complex_')

		# create folder to save results
		self._save_dir_blocks = os.path.join(self._save_dir, \
			'nfft'+str(self._n_DFT)+'_novlp'+str(self._n_overlap) \
			+'_nblks'+str(self._n_blocks))
		if not os.path.exists(self._save_dir_blocks):
			os.makedirs(self._save_dir_blocks)

		# compute approx problem size (assuming double)
		self._pb_size = self._nt * self._nx * self._nv * 8 * BYTE_TO_GB

		# print parameters to the screen
		self.print_parameters()
예제 #35
0
파일: 2b.py 프로젝트: jpdiazp/Tarea4
import scipy as sy
import matplotlib.pyplot as plt
from scipy.special import gamma
from scipy.special import gammaincinv


def chi2(x,dx):
	return (1 / (2*gamma(dx/2)) * (x/2)**(dx/2-1) * sy.exp(-x/2))
 
rand=sy.random.random(size=10000)
 
dev=120
lista1,lista2=[],[]
for i in rand:
	lista1.append(2*gammaincinv(dev/2,i))

#plt.figure("1")



x=sy.linspace(min(lista1),max(lista1),100)

for i in range(len(x)):
	lista2.append(10000*(chi2(x[i],dev)))

plt.hist(lista1, 140, facecolor='green', alpha=0.5)
plt.plot(x,lista2,'-r')	

plt.xlabel("X")
plt.ylabel("$\phi_{\mu,\sigma^2}(X)$")
plt.show()
예제 #36
0
 def _ppf(self, q, a, c):
     val1 = special.gammaincinv(a,q)
     val2 = special.gammaincinv(a,1.0-q)
     ic = 1.0/c
     cond = c+0*val1
     return np.where(cond > 0,val1**ic,val2**ic)
def income_cumulative_prob_inverse(gender, race, age):
	(inc_med, inc_mean) = income_table.get_income(gender, race, age)
	k = (.2*(inc_med/inc_mean) + .8)/(3-3*(inc_med/inc_mean))
	theta = inc_mean/k
	func = lambda prob: gammaincinv(k, prob)*theta
	return func
예제 #38
0
 def _ppf(self, q, df):
     return np.sqrt(2*special.gammaincinv(df*0.5,q))
예제 #39
0
def sernorm(n):
	return gammaincinv(2.*n, 0.5)
예제 #40
0
def find_galaxy_list(map_path, airmass_threshold = airmass_thresholdp, completeness = completenessp, credzone = 0.99):

    #loading the map:
    prob, distmu, distsigma, distnorm = hp.read_map(map_path, field=[0, 1, 2, 3], verbose=False)

    #loading the galaxy catalog. this one contains only RA, DEC, distance, Bmag
    galax = np.load("glade_RA_DEC.npy")


    #map parameters:
    npix = len(prob)
    nside = hp.npix2nside(npix)

    #galaxy parameters(RA, DEC to theta, phi):
    galax = (galax[np.where(galax[:,2]>0),:])[0] #no distance<0

    theta = 0.5 * np.pi - np.pi*(galax[:,1])/180
    phi = np.deg2rad(galax[:,0])
    d = np.array(galax[:,2])


    #converting galaxy coordinates to map pixels:
    ipix = hp.ang2pix(nside, theta, phi)

    #finding given percent probability zone(default is 99%):
    ####################################################

    probcutoff = 1
    probsum = 0

    sortedprob = np.sort(prob)
    while probsum<credzone:
        probsum = probsum+sortedprob[-1]
        probcutoff = sortedprob[-1]
        sortedprob = sortedprob[:-1]

    ####################################################


    #calculating probability for galaxies by the localization map:
    p = prob[ipix]
    distp = (norm(distmu[ipix], distsigma[ipix]).pdf(d) * distnorm[ipix])# * d**2)#/(norm(distmu[ipix], distsigma[ipix]).pdf(distmu[ipix]) * distnorm[ipix] * distmu[ipix]**2)


    #cuttoffs- 99% of probability by angles and 3sigma by distance:
    inddistance = np.where(np.abs(d-distmu[ipix])<nsigmas_in_d*distsigma[ipix])
    indcredzone = np.where(p>=probcutoff)

    galax = galax[np.intersect1d(indcredzone,inddistance)]
    ipix = ipix[np.intersect1d(indcredzone,inddistance)]
    p = p[np.intersect1d(indcredzone,inddistance)]
    p = (p*(distp[np.intersect1d(indcredzone,inddistance)]))##d**2?

    # normalized luminosity to account for mass:
    luminosity = mag.L_nu_from_magAB(galax[:, 3] - 5 * np.log10(galax[:, 2] * (10 ** 5)))
    luminosityNorm = luminosity / np.sum(luminosity)
    normalization = np.sum(p * luminosityNorm)

    #taking 50% of mass (missingpiece is the area under the schecter function between l=inf and the brightest galaxy in the field.
    #if the brightest galaxy in the field is fainter than the schecter function cutoff- no cutoff is made.
    #while the number of galaxies in the field is smaller than minGalaxies- we allow for fainter galaxies, until we take all of them.

    missingpiece = gammaincc(alpha + 2, 10 ** (-(min(galax[:,3]-5*np.log10(galax[:,2]*(10**5))) - MB_star) / 2.5)) ##no galaxies brighter than this in the field- so don't count that part of the Schechter function

    doMassCuttoff = True
    while doMassCuttoff:
        MB_max = MB_star + 2.5 * np.log10(gammaincinv(alpha + 2, completeness+missingpiece))

        if (min(galax[:,3]-5*np.log10(galax[:,2]*(10**5))) - MB_star)>0: #if the brightest galaxy in the field is fainter then cutoff brightness- don't cut by brightness
            MB_max = 100

        brightest = np.where(galax[:,3]-5*np.log10(galax[:,2]*(10**5))<MB_max)
        # print MB_max
        if len(brightest[0])<minGalaxies:
            if completeness>=0.9: #tried hard enough. just take all of them
                completeness = 1 # just to be consistent.
                doMassCuttoff = False
            else:
                completeness = (completeness + (1. - completeness) / 2)
        else: #got enough galaxies
            galax = galax[brightest]
            p = p[brightest]
            luminosityNorm = luminosityNorm[brightest]
            doMassCuttoff = False

    #including observation constraints. (uses code in observational_const.py)
    indices = get_observables(galax, airmass_threshold)
    haleakalaObservable = indices['indHal']
    sidingSpringObservable = indices['indSS']



    #sorting glaxies by probability
    ii = np.argsort(p*luminosityNorm)[::-1]
    
    ####counting galaxies that constitute 50% of the probability(~0.5*0.98)
    sum = 0
    galaxies50per = 0
    observable50per = 0 #how many of the galaxies in the top 50% of probability are observable.
    enough = True
    while sum<0.5:
        if galaxies50per>= len(ii):
            enough = False
            break
        sum = sum + (p[ii[galaxies50per]]*luminosityNorm[ii[galaxies50per]])/float(normalization)
        if ii[galaxies50per] in haleakalaObservable or ii[galaxies50per] in sidingSpringObservable:
            observable50per = observable50per + 1
        galaxies50per = galaxies50per+1
    ####


    #creating sorted galaxy list, containing info on #ngalaxtoshow. each entry is (RA, DEC, distance(Mpc), Bmag, score)
    #score is normalized so that all the galaxies in the field sum to 1 (before luminosity cutoff)
    galaxylist = np.ndarray((ngalaxtoshow, 5))

    ###uncomment to include only observable galaxies.
    # i=0
    # n=0
    # while i<ngalaxtoshow and n<galax.shape[0]:
    #     ind = ii[n]
    #     if ind in haleakalaObservable or ind in sidingSpringObservable:
    #         galaxylist[i,:] = [galax[ind, 0], galax[ind, 1], galax[ind,2], galax[ind,3], (p*luminosityNorm/normalization)[ind]]
    #         i = i+1
    #     n = n+1

    for i in range(ngalaxtoshow):
        ind = ii[i]
        galaxylist[i, :] = [galax[ind, 0], galax[ind, 1], galax[ind, 2], galax[ind, 3],
                            (p * luminosityNorm / normalization)[ind]]

    return galaxylist#[:i,:]#uncomment to include only observable galaxies.

    ##########################################################################################################

#to call function with commandline:
# print find_galaxy_list(sys.argv[1])
예제 #41
0
 def _ppf(self, q, nu):
     return np.sqrt(1.0/nu*special.gammaincinv(nu,q))
예제 #42
0
파일: catalog.py 프로젝트: johnnygreco/hugs
def synthetic_sersics(mu_range=[23, 28], r_eff_range=[3, 15], 
                      n_range=[0.3, 1.5], ell_range=[0., 0.65], 
                      theta_range=[0, 180], nsynths=100, random_state=None,  
                      master_band='g', mu_type="central", 
                      g_i=0.6, g_r=0.4, **kwargs):
    """
    Generate catalog of Sersic galaxies.
    

    Notes
    -----
    full sample:
    median g-i = 0.64
    median g-r = 0.43

    reds:
    median g-i = 0.82
    median g-r = 0.56

    blues:
    median g-i = 0.47
    median g-r = 0.32
    """

    size = int(nsynths)

    rng = check_random_state(random_state)
    r_eff = rng.uniform(*r_eff_range, size=size)    
    sersic_n = rng.uniform(*n_range, size=size)    
    ell = rng.uniform(*ell_range, size=size)    
    theta = rng.uniform(*theta_range, size=size)    
    
    b_a = 1 - ell
    b_n = gammaincinv(2.*sersic_n, 0.5)
    f_n = gamma(2*sersic_n)*sersic_n*np.exp(b_n)/b_n**(2*sersic_n)

    mu = rng.uniform(*mu_range, size=size)    

    if mu_type=='central':
        mu_0 = mu
        mu_e = mu_0 + 2.5*b_n/np.log(10)
        mu_e_ave = mu_e - 2.5*np.log10(f_n)
    elif mu_type=='average':
        mu_e_ave = mu
        mu_e = mu_e_ave + 2.5*np.log10(f_n)
        mu_0 = mu_e - 2.5*b_n/np.log(10) 
    else:
        raise Exception(mu_type+' is not a valid mu type')

    r_circ = r_eff*np.sqrt(b_a)
    A_eff = np.pi*r_circ**2
    m_tot = mu_e_ave - 2.5*np.log10(2*A_eff)    

    cat = {'m_' + master_band: m_tot, 
            'mu_0_' + master_band: mu_0, 
            'mu_e_ave_' + master_band: mu_e_ave}

    if master_band == 'g':
        # write i band
        cat['m_i'] = m_tot - g_i
        cat['mu_0_i'] = mu_0 - g_i
        cat['mu_e_ave_i'] = mu_e_ave - g_i
        # write r band
        cat['m_r'] = m_tot - g_r
        cat['mu_0_r'] = mu_0 - g_r
        cat['mu_e_ave_r'] = mu_e_ave - g_r
    elif master_band == 'i':
        # write g band
        cat['m_g'] = m_tot + g_i
        cat['mu_0_g'] = mu_0 + g_i
        cat['mu_e_ave_g'] = mu_e_ave + g_i
        # write r band
        cat['m_r'] = m_tot + g_i - g_r
        cat['mu_0_r'] = mu_0 + g_i - g_r
        cat['mu_e_ave_r'] = mu_e_ave + g_i - g_r
    else:
        raise Exception('master_band must be g or i')

    cat = Table(cat)
   
    cat['theta'] = theta
    cat['PA'] = theta - 90
    cat['r_e'] = r_eff
    cat['ell'] = 1 - b_a 
    cat['n'] = sersic_n
    cat['g-i'] = g_i
    cat['g-r'] = g_r

    return cat
예제 #43
0
 def _ppf(self, q, df):
     return np.sqrt(2*special.gammaincinv(df*0.5,q))
예제 #44
0
 def _ppf(self, q, a):
     return special.gammaincinv(a, q)
예제 #45
0
파일: rfunc.py 프로젝트: pastas/pasta
 def get_tmax(self, p, cutoff=None):
     if cutoff is None:
         cutoff = self.cutoff
     return gammaincinv(p[1], cutoff) * p[2]
예제 #46
0
    transnoise=numpy.kron(accelnoise**2*numpy.array([(timestep**2/4.0,timestep/2.0),
                                                     (timestep/2.0,1.0)]),numpy.eye(numdim))

    # Create the measurement gain and noise.
    measgain=numpy.kron(numpy.array([1.0,0.0]),numpy.eye(numdim))
    measnoise=numpy.kron(numpy.array([obsnoise**2]),numpy.eye(numdim))

# Define the clutter density.
area=imwidth*imheight
clutterdens=lambda x:1.0/float(area)

# Instantiate the filter.
filt=gmphd.filt(initweight,initmean,initcovar,transgain,transnoise,measgain,measnoise,
                clutterdens,birthrate,clutterrate,survprob,detecprob)

scale=special.gammaincinv(numdim,0.99)

# Create a figure and a pair of axes.
fig,axes=pyplot.subplots()
axes.invert_yaxis()

for frame in range(min(names.keys()),max(names.keys())):
    try:

        # Perform a prediction-update step.
        filt.pred()
        if frame in detections:
            obs=numpy.array(detections[frame],dtype=float).transpose()
            obs[:2,:]+=obs[2:,:]/2.0
            filt.update(obs[:numdim,:],numpy.spacing(1.0))
        filt.prune(truncthres=truncthres,
예제 #47
0
 def _ppf(self, q, a, b, s):
     """
     """
     return gammaincinv(s, q*gammainc(s, b) + gammainc(s, a))
예제 #48
0
 def _ppf(self, q, c):
     return np.log(special.gammaincinv(c,q))
예제 #49
0
 def _ppf(self, q, zm, a, b):
     k = (a + 1) / b
     t = sc.gammainccinv(k, 0.5)
     return zm * (sc.gammaincinv(k, q) / t)**(1 / b)
예제 #50
0
파일: catalog.py 프로젝트: tahumada/gwemopt
def get_catalog(params, map_struct):
    if not os.path.isdir(params["catalogDir"]):
        os.makedirs(params["catalogDir"])

    catalogFile = os.path.join(params["catalogDir"],
                               "%s.hdf5" % params["galaxy_catalog"])

    """AB Magnitude zero point."""
    MAB0 = -2.5 * np.log10(3631.e-23)
    pc_cm = 3.08568025e18
    const = 4. * np.pi * (10. * pc_cm)**2.

    if params["galaxy_catalog"] == "2MRS":
        if not os.path.isfile(catalogFile):
            import astropy.constants as c

            cat, = Vizier.get_catalogs('J/ApJS/199/26/table3')

            ra, dec = cat["RAJ2000"], cat["DEJ2000"]
            cz = cat["cz"]
            magk = cat["Ktmag"]

            z = (u.Quantity(cat['cz']) / c.c).to(u.dimensionless_unscaled)

            completeness = 0.5
            alpha = -1.0
            MK_star = -23.55
            MK_max = MK_star + 2.5 * np.log10(gammaincinv(alpha + 2,
                                                          completeness))
            MK = magk - cosmo.distmod(z)
            idx = (z > 0) & (MK < MK_max)

            ra, dec = ra[idx], dec[idx]
            z = z[idx]
            magk = magk[idx]

            distmpc = cosmo.luminosity_distance(z).to('Mpc').value

            with h5py.File(catalogFile, 'w') as f:
                f.create_dataset('ra', data=ra)
                f.create_dataset('dec', data=dec)
                f.create_dataset('z', data=z)
                f.create_dataset('magk', data=magk)
                f.create_dataset('distmpc', data=distmpc)

        else:
            with h5py.File(catalogFile, 'r') as f:
                ra, dec = f['ra'][:], f['dec'][:]
                z = f['z'][:]
                magk = f['magk'][:]
                distmpc = f['distmpc'][:]
        r = distmpc * 1.0
        mag = magk * 1.0

    elif params["galaxy_catalog"] == "GLADE":
        if not os.path.isfile(catalogFile):
            cat, = Vizier.get_catalogs('VII/281/glade2')

            ra, dec = cat["RAJ2000"], cat["DEJ2000"]
            distmpc, z = cat["Dist"], cat["z"]
            magb, magk = cat["Bmag"], cat["Kmag"]
            # Keep track of galaxy identifier
            GWGC, PGC, HyperLEDA = cat["GWGC"], cat["PGC"], cat["HyperLEDA"]
            _2MASS, SDSS = cat["_2MASS"], cat["SDSS-DR12"]

            idx = np.where(distmpc >= 0)[0]
            ra, dec = ra[idx], dec[idx]
            distmpc, z = distmpc[idx], z[idx]
            magb, magk = magb[idx], magk[idx]
            GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
            _2MASS, SDSS = _2MASS[idx], SDSS[idx]

            with h5py.File(catalogFile, 'w') as f:
                f.create_dataset('ra', data=ra)
                f.create_dataset('dec', data=dec)
                f.create_dataset('distmpc', data=distmpc)
                f.create_dataset('magb', data=magb)
                f.create_dataset('magk', data=magk)
                f.create_dataset('z', data=z)
                # Add galaxy identifier
                f.create_dataset('GWGC', data=GWGC)
                f.create_dataset('PGC', data=PGC)
                f.create_dataset('HyperLEDA', data=HyperLEDA)
                f.create_dataset('2MASS', data=_2MASS)
                f.create_dataset('SDSS', data=SDSS)

        else:
            with h5py.File(catalogFile, 'r') as f:
                ra, dec = f['ra'][:], f['dec'][:]
                distmpc, z = f['distmpc'][:], f['z'][:]
                magb, magk = f['magb'][:], f['magk'][:]
                GWGC, PGC, _2MASS = f['GWGC'][:], f['PGC'][:], f['2MASS'][:]
                HyperLEDA, SDSS = f['HyperLEDA'][:], f['SDSS'][:]
                # Convert bytestring to unicode
                GWGC = GWGC.astype('U')
                PGC = PGC.astype('U')
                HyperLEDA = HyperLEDA.astype('U')
                _2MASS = _2MASS.astype('U')
                SDSS = SDSS.astype('U')

        # Keep only galaxies with finite B mag when using it in the grade
        if params["galaxy_grade"] == "S":
            idx = np.where(~np.isnan(magb))[0]
            ra, dec, distmpc = ra[idx], dec[idx], distmpc[idx]
            magb, magk = magb[idx], magk[idx]
            GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
            _2MASS, SDSS = _2MASS[idx], SDSS[idx]

        r = distmpc * 1.0
        mag = magb * 1.0

    elif params["galaxy_catalog"] == "CLU":
        if not os.path.isfile(catalogFile):
            raise ValueError("Please add %s." % catalogFile)

        with h5py.File(catalogFile, 'r') as f:
            name = f['name'][:]
            ra, dec = f['ra'][:], f['dec'][:]
            sfr_fuv, mstar = f['sfr_fuv'][:], f['mstar'][:]
            distmpc, magb = f['distmpc'][:], f['magb'][:]
            a, b2a, pa = f['a'][:], f['b2a'][:], f['pa'][:]
            btc = f['btc'][:]

        idx = np.where(distmpc >= 0)[0]
        ra, dec = ra[idx], dec[idx]
        sfr_fuv, mstar = sfr_fuv[idx], mstar[idx]
        distmpc, magb = distmpc[idx], magb[idx]
        a, b2a, pa = a[idx], b2a[idx], pa[idx]
        btc = btc[idx]

        idx = np.where(~np.isnan(magb))[0]
        ra, dec = ra[idx], dec[idx]
        sfr_fuv, mstar = sfr_fuv[idx], mstar[idx]
        distmpc, magb = distmpc[idx], magb[idx]
        a, b2a, pa = a[idx], b2a[idx], pa[idx]
        btc = btc[idx]

        z = -1*np.ones(distmpc.shape)
        r = distmpc * 1.0
        mag = magb * 1.0

    L_nu = const * 10.**((mag + MAB0)/(-2.5))
    L_nu = np.log10(L_nu)
    L_nu = L_nu**params["catalog_n"]
    Slum = L_nu / np.sum(L_nu)

    mlim, M_KNmin, M_KNmax = 22, -17, -12
    L_KNmin = const * 10.**((M_KNmin + MAB0)/(-2.5))
    L_KNmax = const * 10.**((M_KNmax + MAB0)/(-2.5))

    Llim = 4. * np.pi * (r * 1e6 * pc_cm)**2. * 10.**((mlim + MAB0)/(-2.5))
    Sdet = (L_KNmax-Llim)/(L_KNmax-L_KNmin)
    Sdet[Sdet < 0.01] = 0.01
    Sdet[Sdet > 1.0] = 1.0

    n, cl = params["powerlaw_n"], params["powerlaw_cl"]
    dist_exp = params["powerlaw_dist_exp"]

    prob_scaled = copy.deepcopy(map_struct["prob"])
    prob_sorted = np.sort(prob_scaled)[::-1]
    prob_indexes = np.argsort(prob_scaled)[::-1]
    prob_cumsum = np.cumsum(prob_sorted)
    index = np.argmin(np.abs(prob_cumsum - cl)) + 1
    prob_scaled[prob_indexes[index:]] = 0.0
    prob_scaled = prob_scaled**n

    theta = 0.5 * np.pi - dec * 2 * np.pi / 360.0
    phi = ra * 2 * np.pi / 360.0
    ipix = hp.ang2pix(map_struct["nside"], ra, dec, lonlat=True)

    if "distnorm" in map_struct:
        if map_struct["distnorm"] is not None:
            
            #creat an mask to cut at 3 sigma in distance
            mask = np.zeros(len(r))

            condition_indexer = np.where( (r < (map_struct["distmu"][ipix] + (3*map_struct["distsigma"][ipix]))) & (r > (map_struct["distmu"][ipix] - (3*map_struct["distsigma"][ipix])) )) 
            mask[condition_indexer] = 1

            Sloc = prob_scaled[ipix] * (map_struct["distnorm"][ipix] *
                                        norm(map_struct["distmu"][ipix],
                                        map_struct["distsigma"][ipix]).pdf(r))**params["powerlaw_dist_exp"] / map_struct["pixarea"]
            

            #multiplie the Sloc by 1 or 0 according to the 3 sigma condistion
            Sloc = Sloc*mask

        else:
            Sloc = copy.copy(prob_scaled[ipix])
    else:
        Sloc = copy.copy(prob_scaled[ipix])

    # Set nan values to zero
    Sloc[np.isnan(Sloc)] = 0

    S = Sloc*Slum*Sdet
    prob = np.zeros(map_struct["prob"].shape)
    if params["galaxy_grade"] == "Sloc":
        prob[ipix] = prob[ipix] + Sloc
        grade = Sloc
    elif params["galaxy_grade"] == "S":
        prob[ipix] = prob[ipix] + S
        grade = S

    prob = prob / np.sum(prob)

    map_struct['prob_catalog'] = prob
    if params["doUseCatalog"]:
        map_struct['prob'] = prob

    idx = np.where(~np.isnan(grade))[0]
    grade = grade[idx]
    ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx]
    distmpc, z = distmpc[idx], z[idx]
    if params["galaxy_catalog"] == "GLADE":
        GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
        _2MASS, SDSS = _2MASS[idx], SDSS[idx]
    
    """
    Sthresh = np.max(grade)*0.01
    idx = np.where(grade >= Sthresh)[0]
    grade = grade[idx]
    ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx]
    distmpc, z = distmpc[idx], z[idx]
    if params["galaxy_catalog"] == "GLADE":
        GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
        _2MASS, SDSS = _2MASS[idx], SDSS[idx]
    """
    
    idx = np.argsort(grade)[::-1]
    grade = grade[idx]

    ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx]
    distmpc, z = distmpc[idx], z[idx]
    if params["galaxy_catalog"] == "GLADE":
        GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
        _2MASS, SDSS = _2MASS[idx], SDSS[idx]

    if len(ra) > 1000:
        print('Cutting catalog to top 1000 galaxies...')
        idx = np.arange(1000).astype(int)
        ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx]
        distmpc, z = distmpc[idx], z[idx]
        if params["galaxy_catalog"] == "GLADE":
            GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx]
            _2MASS, SDSS = _2MASS[idx], SDSS[idx]

    catalog_struct = {}
    catalog_struct["ra"] = ra
    catalog_struct["dec"] = dec
    catalog_struct["Sloc"] = Sloc
    catalog_struct["S"] = S

    if params["writeCatalog"]:
        catalogfile = os.path.join(params["outputDir"], 'catalog.csv')
        fid = open(catalogfile, 'w')
        cnt = 1
        if params["galaxy_catalog"] == "GLADE":
            fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z, GWGC, PGC, HyperLEDA, 2MASS, SDSS\n")
            for a, b, c, d, e, f, g, h, i, j, k in zip(ra, dec, Sloc, S, distmpc, z, GWGC, PGC, HyperLEDA, _2MASS, SDSS):
                fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f, %s, %s, %s, %s, %s\n" % (cnt, a, b, c, d, e, f, g, h, i, j, k))
                cnt = cnt + 1
        else:
            fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z\n")
            for a, b, c, d in zip(ra, dec, Sloc, S, distmpc, z):
                fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f\n" % (cnt, a, b, c, d, e, f))
                cnt = cnt + 1

        fid.close()

    return map_struct, catalog_struct