def compute_normalising_constant_bivariatefisher(self,
                                                   specific_neurons=None):
    '''
            Depending on neuron_sigma, we have different normalising constants per neurons.

            The full formula, for kappa3 != 0 is more complex, we do not use it for now:

            Z = 4 pi^2 \sum_{m=0}^\infty n_choose_k(2m, m) (\kappa_3^2/(4 kappa_1 kappa_2))^m I_m(kappa_1) I_m(kappa_2)

            Here, for \kappa_3=0, only m=0 used
        '''

    if self.normalisation is None:
      self.normalisation = np.zeros(self.M)
      self.normalisation_fisher_all = np.zeros((self.M, self.R))
      self.normalisation_gauss_all = np.zeros((self.M, self.R))

    # The normalising constant
    #   Overflows have happened, but they have no real consequence, as 1/inf = 0.0, appropriately.
    if specific_neurons is None:
      # precompute separate ones
      self.normalisation_fisher_all = 2. * np.pi * scsp.i0(self.neurons_sigma)
      self.normalisation_gauss_all = np.sqrt(self.neurons_sigma) / (
          np.sqrt(2 * np.pi))

      self.normalisation = np.prod(self.normalisation_fisher_all, axis=-1)
    else:
      self.normalisation_fisher_all[specific_neurons] = 2. * np.pi * scsp.i0(
          self.neurons_sigma[specific_neurons])
      self.normalisation_gauss_all[specific_neurons] = np.sqrt(
          self.neurons_sigma[specific_neurons]) / (np.sqrt(2 * np.pi))
      self.normalisation[specific_neurons] = np.prod(
          self.normalisation_fisher_all[specific_neurons], axis=-1)
  def compute_fisher_information_theoretical(self, sigma=None):
    '''
            Compute the theoretical, large N limit estimate of the Fisher Information
            This one assumes a diagonal covariance matrix, wrong for the complete model.
        '''

    assert self.R <= 2, "Not implemented for R>2"

    if self.population_code_type == 'conjunctive':
      rho = 1. / (4 * np.pi**2 / (self.M))
      # rho = 1./(2*np.pi/(self.M))
    elif self.population_code_type == 'feature':
      # M/2 neuron per 2pi dimension.
      print "This looks wrong, doesnt fit at all"
      rho = 1. / (np.pi**2. / self.M**2.)
    else:
      print 'Fisher information not defined for population type ' + self.population_code_type
      return 0

    kappa1 = self.rc_scale[0]
    kappa2 = self.rc_scale[1]

    return kappa1**2. * rho * (
        scsp.i0(2 * kappa1) - scsp.iv(2, 2 * kappa1)
    ) * scsp.i0(2 * kappa2) / (
        sigma**2. * 8 * np.pi**2. * scsp.i0(kappa1)**2. * scsp.i0(kappa2)**2.)
예제 #3
0
def kaiser_discrete(alpha):
    N = 29
    M = N - 1
    ns = np.arange(M + 0.1)
    z = 2 * ns / M - 1
    wn = i0(np.pi * alpha * np.sqrt(1 - (z) ** 2)) / i0(np.pi * alpha)
    return wn
예제 #4
0
    def tetmConstants(self, ri, ro, neff, wl, EH, c, idx):
        a = numpy.empty((2, 2))
        n = self.maxIndex(wl)
        u = self.u(ro, neff, wl)
        urp = self.u(ri, neff, wl)

        if neff < n:
            B1 = j0(u)
            B2 = y0(u)
            F1 = j0(urp) / B1
            F2 = y0(urp) / B2
            F3 = -j1(urp) / B1
            F4 = -y1(urp) / B2
            c1 = wl.k0 * ro / u
        else:
            B1 = i0(u)
            B2 = k0(u)
            F1 = i0(urp) / B1
            F2 = k0(urp) / B2
            F3 = i1(urp) / B1
            F4 = -k1(urp) / B2
            c1 = -wl.k0 * ro / u
        c3 = c * c1

        a[0, 0] = F1
        a[0, 1] = F2
        a[1, 0] = F3 * c3
        a[1, 1] = F4 * c3

        return numpy.linalg.solve(a, EH.take(idx))
예제 #5
0
 def test_bessel_i0(self):
   x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
   x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
   try:
     from scipy import special  # pylint: disable=g-import-not-at-top
     self.assertAllClose(special.i0(x_single),
                         self.evaluate(special_math_ops.bessel_i0(x_single)))
     self.assertAllClose(special.i0(x_double),
                         self.evaluate(special_math_ops.bessel_i0(x_double)))
   except ImportError as e:
     tf_logging.warn('Cannot test special functions: %s' % str(e))
예제 #6
0
def window(x, name, a=2, alpha=5):
    if np.abs(x) <= a:
        if name == 'lanczos':
            return np.sinc(x / a)
        if name == 'hann':
            return 0.5 * (1 + np.cos(np.pi * x / a))
        if name == 'kaiser':
            return i0(np.pi * alpha * np.sqrt(1 - (x / a) ** 2)) / i0(np.pi * alpha)
        if name == None or window == 'boxcar':
            return 1.
    else:
        return 0.
예제 #7
0
파일: tlsif.py 프로젝트: cbrunet/fibermodes
 def _tecoeq(self, v0, nu):
     u1r1, u2r1, u2r2, s1, s2, n1sq, n2sq, n3sq = self.__params(v0)
     (f11a, f11b) = ((j0(u1r1), jn(2, u1r1)) if s1 > 0 else
                     (i0(u1r1), -iv(2, u1r1)))
     if s2 > 0:
         f22a, f22b = j0(u2r2), y0(u2r2)
         f2a = jn(2, u2r1) * f22b - yn(2, u2r1) * f22a
         f2b = j0(u2r1) * f22b - y0(u2r1) * f22a
     else:  # a
         f22a, f22b = i0(u2r2), k0(u2r2)
         f2a = kn(2, u2r1) * f22a - iv(2, u2r1) * f22b
         f2b = i0(u2r1) * f22b - k0(u2r1) * f22a
     return f11a * f2a - f11b * f2b
def kaiser_window(xs, halfwidth, alpha):
    """
        Return the kaiser window function for the values 'xs' when the
            the half-width of the window should be 'haldwidth' with
            the folloff parameter 'alpha'.  The following values are
            particularly interesting:

            alpha
            -----
            0           Rectangular Window
            5           Similar to Hamming window
            6           Similar to Hanning window
            8.6         Almost identical to the Blackman window 
    """
    win = i0(alpha*Num.sqrt(1.0-(xs/halfwidth)**2.0))/i0(alpha)
    return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0)
 def _evaluate(self,R,z,phi=0.,t=0.):
     """
     NAME:
        _evaluate
     PURPOSE:
        evaluate the potential at (R,z)
     INPUT:
        R - Cylindrical Galactocentric radius
        z - vertical height
        phi - azimuth
        t - time
     OUTPUT:
        potential at (R,z)
     HISTORY:
        2012-12-26 - Written - Bovy (IAS)
     """
     if self._new:
         #if R > 6.: return self._kp(R,z)
         if nu.fabs(z) < 10.**-6.:
             y= 0.5*self._alpha*R
             return -nu.pi*R*(special.i0(y)*special.k1(y)-special.i1(y)*special.k0(y))
         kalphamax= 10.
         ks= kalphamax*0.5*(self._glx+1.)
         weights= kalphamax*self._glw
         sqrtp= nu.sqrt(z**2.+(ks+R)**2.)
         sqrtm= nu.sqrt(z**2.+(ks-R)**2.)
         evalInt= nu.arcsin(2.*ks/(sqrtp+sqrtm))*ks*special.k0(self._alpha*ks)
         return -2.*self._alpha*nu.sum(weights*evalInt)
     raise NotImplementedError("Not new=True not implemented for RazorThinExponentialDiskPotential")
 def testValuePR0(self):
     "Test vonMisesKappaConjugate probability by changing R0"
     try:
         from scipy.special import i0, i1
     except ImportError:
         self.skipTest("this test requires the scipy Python module")
     c = 10.0
     no = 1.0
     self.kappa.set_scale(no)
     for i in range(100):
         R0 = uniform(0.0, 10.0)
         self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.m, self.kappa, c, R0)
         ratio = i1(no) / i0(no)
         py = exp(no * R0) / i0(no) ** c
         cpp = self.J.get_probability()
         self.assertAlmostEqual(cpp, py, delta=0.001)
예제 #11
0
def estimate_distribution(hours):
    """
    Estimate the parameters of the von Mises distribution for the data.

    Arguments:
        hours: A NumPy array holding incident times as floats between 0 and 24.

    Returns:
        mu: The distribution's center as a float between -pi and pi.
        kappa: The distribution's measure of concentration as a float.

    More information about the von Mises distribution:
        https://en.wikipedia.org/wiki/Von_Mises_distribution
    """
    theta = hours_to_radians(hours)
    z = np.vectorize(complex)(np.cos(theta), np.sin(theta))
    n = len(z)

    z_mean = np.mean(z)
    mu = np.arctan2(z_mean.imag, z_mean.real)

    r2 = np.mean(z.real)**2 + np.mean(z.imag)**2
    re = np.sqrt(n/(n - 1)*(r2 - 1/n))

    x = np.arange(0, 10, 1e-4)
    y = np.abs(i1(x)/i0(x) - re)
    kappa = x[np.argmin(y)]

    return mu, kappa
예제 #12
0
def mmse_stsa(infile, outfile, noise_sum):
    signal, params = read_signal(infile, WINSIZE)
    nf = len(signal)/(WINSIZE/2) - 1
    sig_out=sp.zeros(len(signal),sp.float32)

    G = sp.ones(WINSIZE)
    prevGamma = G
    alpha = 0.98
    window = sp.hanning(WINSIZE)
    gamma15=spc.gamma(1.5)
    lambdaD = noise_sum / 5.0
    percentage = 0
    for no in xrange(nf):
        p = int(math.floor(1. * no / nf * 100))
        if (p > percentage):
            percentage = p
            print "{}%".format(p),

        y = get_frame(signal, WINSIZE, no)
        Y = sp.fft(y*window)
        Yr = sp.absolute(Y)
        Yp = sp.angle(Y)
        gamma = Yr**2/lambdaD
        xi = alpha * G**2 * prevGamma + (1-alpha)*sp.maximum(gamma-1, 0)
        prevGamma = gamma
        nu = gamma * xi / (1+xi)
        G = (gamma15 * sp.sqrt(nu) / gamma ) * sp.exp(-nu/2) * ((1+nu)*spc.i0(nu/2)+nu*spc.i1(nu/2))
        idx = sp.isnan(G) + sp.isinf(G)
        G[idx] = xi[idx] / (xi[idx] + 1)
        Yr = G * Yr
        Y = Yr * sp.exp(Yp*1j)
        y_o = sp.real(sp.ifft(Y))
        add_signal(sig_out, y_o, WINSIZE, no)
    
    write_signal(outfile, params, sig_out)
예제 #13
0
def pdf_vn(x, b, loc):
    """pdf of von mises distribution

    has fixed scale=1, otherwise same as scipy.stats.vonmises

    """
    return np.exp(b * np.cos(x - loc)) / (2.0 * np.pi * special.i0(b))
예제 #14
0
def vonMises_fn(phi, k, mu):
    """
    The vonMises function generates a weighting for different
    cyclic quantities phi from a vonMises distribution centered
    around mu with a kernel width k.
    """
    return (1/(2*np.pi*ss.i0(k))) * np.e**(k*np.cos(2*(phi-mu)))
 def testValueEc(self):
     "Test vonMisesKappaConjugate energy by changing c"
     try:
         from scipy.special import i0, i1
     except ImportError:
         self.skipTest("this test requires the scipy Python module")
     R0 = 1
     no = 1.0
     self.kappa.set_scale(no)
     for i in range(100):
         c = uniform(1.0, 100)
         self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.m, self.kappa, c, R0)
         ratio = i1(no) / i0(no)
         py = -no * R0 + c * log(i0(no))
         cpp = self.J.evaluate(False)
         self.assertAlmostEqual(cpp, py, delta=0.001)
예제 #16
0
def circularKDE(parameters, kdeStep=np.pi/16.):
    """
    Create a probability distribution function for radial-type data,
    e.g. bearings. Returns the grid on which the PDF is defined, the
    PDF itself and the corresponding CDF.

    By default, the grid is specified on [0,2\*pi)

    :param parameters: :class:`numpy.ndarray` of parameter values.
    :param float kdeStep: Increment of the ordinate at which the
                          distribution will be estimated.

    :returns: :class:`numpy.ndarray` of the grid, the PDF and the CDF.
    
    """
    bw = KPDF.UPDFOptimumBandwidth(parameters)
    grid = np.arange(0, 2 * np.pi +kdeStep, kdeStep)
    pdf = np.empty(len(grid), 'float')
    chi = 1./(2 * np.pi * i0(bw))
    for k in parameters:
        kH = chi * np.exp(bw * np.cos(grid-k))
        pdf += kH/kH.sum()

    pdf = pdf/len(pdf)
    cy = stats.cdf(grid, pdf)

    return grid, pdf, cy
예제 #17
0
파일: gauss.py 프로젝트: kelvich/wavepack
 def momentum(self,k1,t1,k2,t2):
     #qq = lambda phi: k1**2 + k2**2 - 2*k1*k2*(cos(t2-t1) - 2*sin(t2)*sin(t1)*sin(phi)**2)
     #poten = lambda phi: exp( -1*self.a**2*qq(phi/2.)/4. )
     #return self.A*self.a**3/(8*pi**1.5)*quad(poten,0,2*pi)[0]
     arg1 = -1*(k1**2 + k2**2 - 2*k1*k2*cos(t2)*cos(t1))*self.a/4.0
     arg2 = self.a**2*k1*k2*sin(t1)*sin(t2)
     return self.A*self.a**3/4*pi**0.5*exp( arg1 )*i0( arg2 )
예제 #18
0
파일: tlsif.py 프로젝트: cbrunet/fibermodes
 def _tmcoeq(self, v0, nu):
     u1r1, u2r1, u2r2, s1, s2, n1sq, n2sq, n3sq = self.__params(v0)
     if s1 == 0:  # e
         f11a, f11b = 2, 1
     elif s1 > 0:  # a, b, d
         f11a, f11b = j0(u1r1) * u1r1, j1(u1r1)
     else:  # c
         f11a, f11b = i0(u1r1) * u1r1, i1(u1r1)
     if s2 > 0:
         f22a, f22b = j0(u2r2), y0(u2r2)
         f2a = j1(u2r1) * f22b - y1(u2r1) * f22a
         f2b = j0(u2r1) * f22b - y0(u2r1) * f22a
     else:  # a
         f22a, f22b = i0(u2r2), k0(u2r2)
         f2a = i1(u2r1) * f22b + k1(u2r1) * f22a
         f2b = i0(u2r1) * f22b - k0(u2r1) * f22a
     return f11a * n2sq * f2a - f11b * n1sq * f2b * u2r1
예제 #19
0
파일: utils.py 프로젝트: Azhag/Gitsby
def kappa_to_stddev(kappa):
    '''
        Convert kappa to wrapped gaussian std dev

        std = 1 - I_1(kappa)/I_0(kappa)
    '''
    # return 1.0 - scsp.i1(kappa)/scsp.i0(kappa)
    return np.sqrt(-2.*np.log(scsp.i1(kappa)/scsp.i0(kappa)))
예제 #20
0
def rotation_velocity(pos):
  rho = (pos[0]**2 + pos[1]**2)**0.5
  phi = np.arctan2(pos[1], pos[0])
  y = rho/(2*Rd)
  sigma0 = M_dm / (2*pi*Rd**2)
  speed = (4*pi*G*sigma0*y**2*(i0(y)*k0(y) - i1(y)*k1(y)) +
           (G*M_dm*rho)/(rho+a_dm)**2 + (G*M_bulge*rho)/(rho+a_bulge)**2)**0.5
  return (-speed*sin(phi), speed*cos(phi), 0)
def vonmisespdf(x, mu, K):
  '''
        Von Mises PDF (switch to Normal if high kappa)
    '''
  if K > 700.:
    return np.sqrt(K)/(np.sqrt(2*np.pi))*np.exp(-0.5*(x - mu)**2.*K)
  else:
    return np.exp(K*np.cos(x-mu)) / (2.*np.pi * spsp.i0(K))
 def testValueEKappa(self):
     "Test vonMisesKappaConjugate energy by changing kappa"
     try:
         from scipy.special import i0,i1
     except ImportError:
         self.skipTest("this test requires the scipy Python module")
     c=10
     R0=1
     self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.kappa,c,R0)
     self.m.add_restraint(self.J)
     for i in xrange(100):
         no=uniform(0.1,100)
         self.kappa.set_scale(no)
         ratio=i1(no)/i0(no)
         py=-no*R0 + c*log(i0(no))
         cpp=self.J.evaluate(None)
         self.assertAlmostEqual(cpp,py,delta=0.001)
 def testDerivativeKappa(self):
     "Test vonMisesKappaConjugate derivative by changing kappa"
     try:
         from scipy.special import i0,i1
     except ImportError:
         self.skipTest("this test requires the scipy Python module")
     c=10
     R0=1
     self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.kappa,c,R0)
     self.m.add_restraint(self.J)
     for i in xrange(100):
         no=uniform(0.1,100)
         self.kappa.set_scale(no)
         self.m.evaluate(self.DA)
         ratio=i1(no)/i0(no)
         self.assertAlmostEqual(self.kappa.get_scale_derivative(),
                 -R0 + c*i1(no)/i0(no), delta=0.001)
예제 #24
0
def kaiser(M,beta,sym=1):
    """Return a Kaiser window of length M with shape parameter beta.

    """
    if M < 1:
        return array([])
    if M == 1:
        return ones(1,'d')
    odd = M % 2
    if not sym and not odd:
        M = M+1
    n = arange(0,M)
    alpha = (M-1)/2.0
    w = special.i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/special.i0(beta)
    if not sym and not odd:
        w = w[:-1]
    return w
 def testValuePc(self):
     "test probability by changing c"
     try:
         from scipy.special import i0,i1
     except ImportError:
         self.skipTest("this test requires the scipy Python module")
     R0=1.0
     no=1.0
     self.kappa.set_scale(no)
     for i in xrange(100):
         c=uniform(2.0,75)
         self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.kappa,c,R0)
         self.m.add_restraint(self.J)
         ratio=i1(no)/i0(no)
         py=exp(no*R0)/i0(no)**c
         cpp=self.J.get_probability()
         self.assertAlmostEqual(cpp,py,delta=0.001)
         self.m.remove_restraint(self.J)
예제 #26
0
 def gradient(self,phases,log10_ens=3,free=False):
     e,width,loc = self._make_p(log10_ens)
     my_i0 = i0(1./width)
     my_i1 = i1(1./width)
     z = TWOPI*(phases-loc)
     cz = np.cos(z)
     sz = np.sin(z)
     f = (np.exp(cz)/width)/my_i0
     return np.asarray([-cz/width**2*f,TWOPI*(sz/width+my_i1/my_i0)*f])
 def _margdistphase_loglr(self, mf_snr, opt_snr):
     """Returns the log likelihood ratio marginalized over distance and
     phase.
     """
     logl = numpy.log(special.i0(mf_snr))
     logl_marg = logl/self._dist_array
     opt_snr_marg = opt_snr/self._dist_array**2
     return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
                              b=self._deltad*self.dist_prior)
 def population_code_response_2D(theta1,
                                 theta2,
                                 pref_angles,
                                 N=10,
                                 kappa=0.1,
                                 amplitude=1.0):
     return amplitude * np.exp(kappa * np.cos(theta1 - pref_angles[:, 0]) +
                               kappa * np.cos(theta2 - pref_angles[:, 1])
                               ) / (4. * np.pi**2. * scsp.i0(kappa)**2.)
예제 #29
0
    def _run(self, rzlist, t):

        r = rzlist[0]
        z = rzlist[1]

        temperature = self.T0 + (self.TL - self.T0) * z / self.L + \
            (self.g0 / (2 * self.k)) * z * (self.L - z)
        sum = 0
        for n in xrange(0, self.Nsum):
            nodd = float(2 * n + 1)
            lam = nodd * np.pi / self.L
            iratio = (2. / np.pi) * i0(lam * r) / i0(lam * self.b)
            sum += (2 * self.Tb - self.T0) * iratio * np.sin(lam * z) / nodd
            sum += self.TL * iratio * (-1)**nodd * np.sin(lam * z) / nodd
            sum += -(2 * self.g0 * self.L**2 / (np.pi**2 * self.k)) * np.sin(lam * z) / nodd**2
            temperature += sum

        return ExactSolution([r, z, temperature],
                names=['position_r', 'position_z', 'temperature'],
                jumps=[]
                )
예제 #30
0
def Gamma(nw_rad, lay_ox, L_d, L_tf, eps_1, eps_2, eps_3): 
    fact1 = (nw_rad + lay_ox)/L_d
    fact2 = nw_rad/L_tf
    fact3 = fact1**(-1)
    fact4 = (nw_rad + lay_ox)/nw_rad 
    num    = eps_1*k0(fact1)*(L_d/L_tf)*i1(fact2)
    denom1 = k0(fact1)*fact3 
    denom2 = log(fact4)*k1(fact1)*(eps_3/eps_2)
    denom3 = (denom1 + denom2)*eps_1*fact2*i1(fact2) 
    denom  = denom3 + eps_3*k1(fact1)*i0(fact2) 
    gamma = num/denom 
    return gamma
예제 #31
0
# parameter to define the user-defined von Mises on the SEMI-CIRCLE 
ll = 2.0
# parameter to define the stats von Mises on the SEMI-CIRCLE 
scal = 0.5

#%% plot some modified Bessel functions: 
kappp = np.array(np.arange(1e-3,12,4))
mu1 = np.pi/4
l_lim = -np.pi/2 - mu1
u_lim = np.pi/2 - mu1
t_ = np.linspace( l_lim, u_lim, N )
fig, ax = plt.subplots(1, 1, figsize=(9,3))
for kap in kappp:
    # check the I0 for shifting: 
    I0a = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x))), l_lim, u_lim) - np.pi*special.i0(kap)
    I0c = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x))), -np.pi/2, np.pi/2) - np.pi*special.i0(kap)
    I0b = integrate.quad(lambda x: (np.exp(kap*np.cos(2*(x-mu1)))), -np.pi/2, np.pi/2) - np.pi*special.i0(kap)
    print('I0a, I0b, I0c: ',I0a[0], I0b[0], I0c[0])
    # check the I1 for shifting: 
    I1a = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x)))*(np.cos(2*x)), l_lim, u_lim) - np.pi*special.i1(kap)
    I1c = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x)))*(np.cos(2*x)), -np.pi/2, np.pi/2) - np.pi*special.i1(kap)
    I1b = integrate.quad(lambda x: (np.exp(kap*np.cos(2*(x-mu1))))*(np.cos(2*(x-mu1))), -np.pi/2, np.pi/2) - np.pi*special.i1(kap)
    I1d = integrate.quad(lambda x: (np.exp(kap*np.cos(2*(x-mu1))))*(np.cos(2*(x-mu1))), l_lim, u_lim) - np.pi*special.i1(kap)
    print('I1a, I1c, I1b, I1d: ',I1a[0], I1c[0], I1b[0], I1d[0])
    # check the integral of the VM: 
    VM = np.exp(kap*np.cos(2*(t_-mu1)))/(np.pi*i0(kap))
    Ivm1a = integrate.quad(lambda x: (np.exp(kap*np.cos(2*(x-mu1))))/(np.pi*i0(kap)), l_lim, u_lim) 
    Ivm2a = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x)))/(np.pi*i0(kap)), l_lim, u_lim) 
    Ivm1b = integrate.quad(lambda x: (np.exp(kap*np.cos(2*(x-mu1))))/(np.pi*i0(kap)), -np.pi/2, np.pi/2) 
    Ivm2b = integrate.quad(lambda x: (np.exp(kap*np.cos(2*x)))/(np.pi*i0(kap)), -np.pi/2, np.pi/2) 
예제 #32
0
print(zm)

## p1, Dp1, p2, Dp2, p2star, Dp2star

p1_iterable = ((par['alpha_m'] * k1(par['alpha_m'] * locs['rad'][i]) /
                k0(par['alpha_m'] * locs['rad'][i])) for i in range(n))
p1 = numpy.fromiter(p1_iterable, float)

Dp1 = p1 * numpy.identity(n)

## in p2: should check if delta means delta_h or delta_m
## in confirm.nb it is delta_h = k_h * a_h * alpha_h (patch dependent)

p2_iterable = ((locs['delta'])[i] * i1(locs['alpha'][i] * locs['rad'][i]) /
               (par['am'] * par['km'] * i0(locs['alpha'][i] * locs['rad'][i]))
               for i in range(n))
p2 = numpy.fromiter(p2_iterable, float)

Dp2 = p2 * numpy.identity(n)

p2star_iterable = (
    par['km'] * par['alpha_m'] * par['am'] *
    i1(par['alpha_m'] * locs['rad'][i]) /
    (par['am'] * par['km'] * i0(par['alpha_m'] * locs['rad'][i]))
    for i in range(n))
p2star = numpy.fromiter(p2star_iterable, float)

Dp2star = p2star * numpy.identity(n)

## S matrix
예제 #33
0
 def __call__(self,phases,log10_ens=3):
     e,width,loc = self._make_p(log10_ens)
     z = TWOPI*(phases-loc)
     return np.exp(np.cos(z)/width)/i0(1./width)
예제 #34
0
 def fun_mle(x):
     #FIX: square or square-root(z2) was missing
     #val = z2 - ((i1(x) / i0(x)))**2 #
     val = z - i1(x) / i0(x)
     #print val
     return val
예제 #35
0
def plot(Phi, myinit, prob, ana):
    # Parse myinit
    gdata, Sdata, Kdata = myinit

    # Parse gdata
    x1f, x1c, x2f, x2c = gdata
    nx2, nx1 = len(x2c), len(x1c)

    # Plot Phi
    fig = plt.figure(facecolor='white')
    ax1 = fig.add_subplot(111)

    X, Y = np.meshgrid(x1f, x2f, indexing='xy')

    if ana:
        XC, YC = np.meshgrid(x1c, x2c, indexing='xy')
        X1C, X2C = np.sqrt(XC**2. + YC**2.), np.arctan2(YC, XC)
        X2C[X2C < 0] += 2. * np.pi

        if prob == 'exp':
            Rd = 0.5
            sig0 = 1.
            y = X1C / (2. * Rd)
            Phiana = -np.pi * G * sig0 * X1C * (i0(y) * kn(1, y) -
                                                i1(y) * kn(0, y))
        elif prob == 'constant':
            sig0 = 2.e-3
            v_max = x1c / np.max(x1c)
            u_min = np.min(x1c) / x1c
            grana = 4 * G * sig0 * ((ellipe(v_max) - ellipk(v_max)) / v_max +
                                    ellipk(u_min) - ellipe(u_min))

        elif prob == 'mestel':
            v0 = 1.
            eps = np.max(x1f)
            Phiana = v0**2. * (np.log(X1C / eps) + np.log(0.5))

        elif prob == 'kuzmin':
            a = 1.
            M = 1.
            Phiana = -G * M / np.sqrt(X1C**2. + a**2.)

        elif prob == 'cylinders':
            sig = 0.1
            Rk = lambda rk, pk: np.sqrt(X1C**2. + rk**2. - 2. * X1C * rk * np.
                                        cos(X2C - pk))
            yk = lambda rk, pk: Rk(rk, pk) / (2. * sig)
            Phik = lambda rk, pk: -0.5 * (G / sig**2.) * Rk(rk, pk) * (i0(
                yk(rk, pk)) * kn(1, yk(rk, pk)) - i1(yk(rk, pk)) * kn(
                    0, yk(rk, pk)))

            Phiana = 2. * Phik(3., 1e-3) + 0.5 * Phik(3., np.pi + 1e-3) + Phik(
                2.0, 0.75 * np.pi)

        else:
            print(
                '[plot]: Analytic solution not provided for prob: %s, exiting...'
                % (prob))
            quit()

        print('[plot]: Plotting analytical solution for %s prob' % (prob))
        # Compute RMS error
        rms = np.sqrt(np.sum((Phiana - Phi)**2.) / (float(Phiana.size)))

        # Plot analytic result
        plt.figure(facecolor='white')
        plt.plot(x1c, Phi[nx2 / 2, :], 'b.', label='$\Phi_G$ (numerical)')
        plt.plot(x1c, Phiana[nx2 / 2, :], 'r--', label='$\Phi_G$ (analytical)')
        plt.xlabel('R [code units]')
        plt.ylabel('$\Phi_G$')
        plt.title('RMS error: %3.2e' % (rms))
        plt.legend(loc=4)

    im = ax1.pcolorfast(X, Y, Phi, cmap='magma')
    ax1.set_aspect('equal')
    ax1.set_xlabel('x [code units]')
    ax1.set_ylabel('y [code units]')
    cbar = fig.colorbar(im, label='$\Phi_G$')

    # Plot data
    data = Sdata
    '''
    fig1 = plt.figure(facecolor='white')
    ax2 = fig1.add_subplot(111) 
    im  = ax2.pcolorfast(X, Y, data,cmap='magma') 
    ax2.set_aspect('equal') 
    ax2.set_xlabel('x [code units]')
    ax2.set_ylabel('y [code units]') 
    cbar = fig1.colorbar(im,label='$\Sigma$')
    '''
    plt.show()

    return
예제 #36
0
 def EqF6(self,x):
   """
     Relation between rotation and Gaussian broadening kernels by least-squares fitting
   """
   result = 1./sqrt(2.)-(12.*(self["linLimb"]+2.*self["quadLimb"])*exp(-x**2))/(x**2*(-6.+2.*self["linLimb"]+self["quadLimb"])) \
          + (6.*exp(-x**2/2.))/(x**3*(-6.+2.*self["linLimb"]+self["quadLimb"])) * ( -2.*x**3*(-1.+self["linLimb"]+self["quadLimb"])*i0(x**2/2.) \
          + 2.*x*(-2.*self["quadLimb"]+x**2*(-1.+self["linLimb"]+self["quadLimb"])) * i1(x**2/2.)
          + sqrt(pi)*exp(x**2/2.)*(self["linLimb"]+2.*self["quadLimb"])*erf(x) )
   return result
예제 #37
0
 def _margtimephase_loglr(self, mf_snr, opt_snr):
     """Returns the log likelihood ratio marginalized over time and phase.
     """
     return special.logsumexp(numpy.log(special.i0(mf_snr)),
                              b=self._deltat) - 0.5 * opt_snr
예제 #38
0
 def fun_mle(x):
     val = z - i1(x) / i0(x)
     return val
예제 #39
0
def kirchhoff_roughness(dat, picknum, freq, filt_n=101, eps=3.15):
    """
    Roughness by Kirchhoff Theory
    Christianson et al. (2016), equation C2

    Paramaters
    ----------
    freq:   float
        antenna frequency
    filt_n: int; optional
        number of traces included in the median filter
    eps:    float; optional
        relative permittivity of ice
    """

    if 'interp' not in vars(dat.flags):
        raise KeyError('Do interpolation before roughness calculation.')

    # calculate the speed and wavelength
    eps0 = 8.8541878128e-12  # vacuum permittivity
    mu0 = 1.25663706212e-6  # vacuum permeability
    u = 1. / np.sqrt(eps * eps0 * mu0)  # speed of light in ice
    lam = u / freq  # wavelength m

    # get a pick depth
    if 'z' in vars(dat.picks):
        Z = dat.picks.z
    else:
        print('Warning: setting pick depth for constant velocity in ice.')
        Z = dat.picks.time * u / 2 / 1e6

    # Find window size based on the width of the first Fresnel zone
    D1 = np.sqrt(2. * lam *
                 (np.nanmean(Z) / np.sqrt(eps)))  # Width of Fresnel zone
    dx = dat.trace_int[0]  # m spacing between traces
    N = int(round(D1 / (2. * dx)))  # number of traces in the Fresnel window

    # -----------------------------------------------------------------------------

    # Define the bed geometry
    bed_raw = dat.elev - Z[picknum]
    bed_filt = medfilt(bed_raw, filt_n)

    # RMS bed roughness; Christianson et al. (2016) equation C2
    ED1 = np.nan * np.empty((len(bed_filt), ))
    for n in range(N, len(bed_filt) - N + 1):
        b = bed_filt[n - N:n + N].copy()
        b = b[np.where(~np.isnan(b))]
        if len(b) <= 1:
            ED1[n] = np.nan
        else:
            b_ = detrend(b)
            b_sum = 0.
            for i in range(len(b)):
                b_sum += (b_[i])**2.
            ED1[n] = np.sqrt((1 / (len(b) - 1.)) * b_sum)

    # Find the power reduction by Kirchoff theory
    # Christianson et al. (2016), equation C1

    g = 4. * np.pi * ED1 / lam
    b = (i0((g**2.) / 2.))**2.
    pn = np.exp(-(g**2.)) * b

    return ED1, pn
예제 #40
0
def w(r, z, dp):
    #return i0(k * r) + k0(k * r) - ((1 + pow(m, 2)) * dp / M) - 1
    return c1(z, dp) * i0(k * r) + c2(z, dp) * k0(k * r) - (
        (1 + pow(m, 2)) * dp / M) - 1
예제 #41
0
def c1(z, dp):
    term_1 = c - (k0(k * r1) - k0(k * r2(z))) * c2(z, dp)
    term_2 = i0(k * r1) - i0(k * r2(z))
    return term_1 / term_2
예제 #42
0
def von_mises_cdf_normalapprox(k, x):
    b = np.sqrt(2/np.pi)*np.exp(k)/i0(k)
    z = b*np.sin(x/2.)
    return scipy.stats.norm.cdf(z)
예제 #43
0
def kaiser(N, beta, flag='asymmetric', length='full'):
    r'''
    The Kaiser window function

    .. math::
       w[n] = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} \right)/I_0(\beta)

    with
    .. math::
       \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},

    where :math:`I_0` is the modified zeroth-order Bessel function.

    Parameters
    ----------
    N: int
        the window length
    beta: float
        Shape parameter, determines trade-off between main-lobe width and
        side lobe level. As beta gets large, the window narrows.
    flag: string, optional
        Possible values

        - *asymmetric*: asymmetric windows are used
          for overlapping transforms (:math:`M=N`)
        - *symmetric*: the window is symmetric (:math:`M=N-1`)
        - *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
          :math:`w[n]^2 + w[n+N/2]^2=1`)

    length: string, optional
        Possible values

        - *full*: the full length window is computed
        - *right*: the right half of the window is computed
        - *left*: the left half of the window is computed
    '''

    # first choose the indexes of points to compute
    if length == 'left':  # left side of window
        t = np.arange(0, N / 2)
    elif length == 'right':  # right side of window
        t = np.arange(N / 2, N)
    else:  # full window by default
        t = np.arange(0, N)

    # if asymmetric window, denominator is N, if symmetric it is N-1
    if flag in ['symmetric', 'mdct']:
        t = t / float(N - 1)
    else:
        t = t / float(N)

    n = np.arange(0, N)
    alpha = (N - 1) / 2.0
    w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha)**2.0)) /
         special.i0(beta))

    # make the window respect MDCT condition
    if flag == 'mdct':
        d = w[:N / 2] + w[N / 2:]
        w[:N / 2] *= 1. / d
        w[N / 2:] *= 1. / d

    # compute window
    return w
예제 #44
0
 def _margphase_loglr(self, mf_snr, opt_snr):
     """Returns the log likelihood ratio marginalized over phase.
     """
     return numpy.log(special.i0(mf_snr)) - 0.5 * opt_snr
예제 #45
0
파일: windows.py 프로젝트: jgoppert/scipy
def kaiser(M, beta, sym=True):
    r"""Return a Kaiser window.

    The Kaiser window is a taper formed by using a Bessel function.

    Parameters
    ----------
    M : int
        Number of points in the output window. If zero or less, an empty
        array is returned.
    beta : float
        Shape parameter, determines trade-off between main-lobe width and
        side lobe level. As beta gets large, the window narrows.
    sym : bool, optional
        When True (default), generates a symmetric window, for use in filter
        design.
        When False, generates a periodic window, for use in spectral analysis.

    Returns
    -------
    w : ndarray
        The window, with the maximum value normalized to 1 (though the value 1
        does not appear if `M` is even and `sym` is True).

    Notes
    -----
    The Kaiser window is defined as

    .. math::  w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
               \right)/I_0(\beta)

    with

    .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},

    where :math:`I_0` is the modified zeroth-order Bessel function.

    The Kaiser was named for Jim Kaiser, who discovered a simple approximation
    to the DPSS window based on Bessel functions.
    The Kaiser window is a very good approximation to the Digital Prolate
    Spheroidal Sequence, or Slepian window, which is the transform which
    maximizes the energy in the main lobe of the window relative to total
    energy.

    The Kaiser can approximate many other windows by varying the beta
    parameter.

    ====  =======================
    beta  Window shape
    ====  =======================
    0     Rectangular
    5     Similar to a Hamming
    6     Similar to a Hann
    8.6   Similar to a Blackman
    ====  =======================

    A beta value of 14 is probably a good starting point. Note that as beta
    gets large, the window narrows, and so the number of samples needs to be
    large enough to sample the increasingly narrow spike, otherwise NaNs will
    get returned.

    Most references to the Kaiser window come from the signal processing
    literature, where it is used as one of many windowing functions for
    smoothing values.  It is also known as an apodization (which means
    "removing the foot", i.e. smoothing discontinuities at the beginning
    and end of the sampled signal) or tapering function.

    References
    ----------
    .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
           digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
           John Wiley and Sons, New York, (1966).
    .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
           University of Alberta Press, 1975, pp. 177-178.
    .. [3] Wikipedia, "Window function",
           http://en.wikipedia.org/wiki/Window_function

    Examples
    --------
    Plot the window and its frequency response:

    >>> from scipy import signal
    >>> from scipy.fftpack import fft, fftshift
    >>> import matplotlib.pyplot as plt

    >>> window = signal.kaiser(51, beta=14)
    >>> plt.plot(window)
    >>> plt.title(r"Kaiser window ($\beta$=14)")
    >>> plt.ylabel("Amplitude")
    >>> plt.xlabel("Sample")

    >>> plt.figure()
    >>> A = fft(window, 2048) / (len(window)/2.0)
    >>> freq = np.linspace(-0.5, 0.5, len(A))
    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
    >>> plt.plot(freq, response)
    >>> plt.axis([-0.5, 0.5, -120, 0])
    >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
    >>> plt.ylabel("Normalized magnitude [dB]")
    >>> plt.xlabel("Normalized frequency [cycles per sample]")

    """
    # Docstring adapted from NumPy's kaiser function
    if M < 1:
        return np.array([])
    if M == 1:
        return np.ones(1, 'd')
    odd = M % 2
    if not sym and not odd:
        M = M + 1
    n = np.arange(0, M)
    alpha = (M - 1) / 2.0
    w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha)**2.0)) /
         special.i0(beta))
    if not sym and not odd:
        w = w[:-1]
    return w
def get_vonMises_pdf(mu, kappa):
    return lambda x: np.exp(kappa * np.cos(x - mu)) / (2 * np.pi * i0(kappa))
예제 #47
0
def von_mises_cdf_normalapprox(k,x,C1):
    b = np.sqrt(2/np.pi)*np.exp(k)/i0(k)
    z = b*np.sin(x/2.)
    C = 24*k
    chi = z - z**3/((C-2*z**2-16)/3.-(z**4+7/4.*z**2+167./2)/(C+C1-z**2+3))**2
    return scipy.stats.norm.cdf(z)
# Draw samples from the distribution:

mu, kappa = 0.0, 4.0  # mean and dispersion
s = np.random.vonmises(mu, kappa, 1000)

# Display the histogram of the samples, along with
# the probability density function:

import matplotlib.pyplot as plt
from scipy.special import i0
plt.hist(s, 50, normed=True)
x = np.linspace(-np.pi, np.pi, num=51)
y = np.exp(kappa * np.cos(x - mu)) / (2 * np.pi * i0(kappa))
plt.plot(x, y, linewidth=2, color='r')
plt.show()
예제 #49
0
def zeroeth_order_bessel(x):
    return i0(x)
예제 #50
0
def write2dShape(file_name, H, L, sample_file, h, hill_name, AR):
    if (hill_name == "RUSHIL"):
        H = 0.117  #float(sys.argv[2]) # [m]
        # ground shape equation
        n = AR  #1.0/3 n = H/a
        m = n + sqrt(n**2 + 1)
        a = H / n  # [m] hill half length
        zeta = linspace(-a, a, 101)
        X = 0.5 * zeta * (1 + a**2 / (zeta**2 + m**2 * (a**2 - zeta**2)))
        Y = 0.5 * m * sqrt(a**2 - zeta**2) * (1 - a**2 / (zeta**2 + m**2 *
                                                          (a**2 - zeta**2)))
        X = insert(X, 0, -L)
        X = append(X, L)
        Y = insert(Y, 0, 0)
        Y = append(Y, 0)
    elif (hill_name == "MartinezBump2D"):
        A = 3.1926
        H = 200  # [m]
        a = H * AR  # [m]
        X0 = X = linspace(-a, a, 101)  # [m]
        Y0 = Y = -H * 1 / 6.04844 * (sp.j0(A) * sp.i0(A * X / a) -
                                     sp.i0(A) * sp.j0(A * X / a))
        X = insert(X, 0, -L)
        X = append(X, L)
        Y = insert(Y, 0, 0)
        Y = append(Y, 0)
        h = 10

    # opening file
    infile = open(file_name, "r")
    outfile = open(file_name + "_t", "w")
    N = i = 0
    polyLineStart = polyLineEnd = []
    value = 0.0
    # reading first line
    line, N = infile.readline(), N + 1
    while line:
        # finding the "polyLine" lines (expecting 2)
        if line.find("polyLine") > 0:
            outfile.write(line)
            # saving start of polyLine
            polyLineStart.append(N + 1)
            i += 1
            # writing new ground shape
            for x, y in zip(X, Y):
                groundLine = "        (     %12.10f     %12.10f    %12.10f    )\n" % (
                    x, y, value)
                outfile.write(groundLine)
            value = 0.1
            # finding end of polyline
            farFromTheEnd = 1
            while farFromTheEnd:
                if line.find("(") < 0 and line.find(")") > 0:
                    polyLineEnd.append(N - 1)
                    farFromTheEnd = 0
                    outfile.write(line)
                line, N = infile.readline(), N + 1
        else:
            outfile.write(line)
            # reading new line
            line, N = infile.readline(), N + 1
    # close files
    infile.close()
    outfile.close()
    # copying original to new and viceversa
    subprocess.call("cp -r " + file_name + " " + file_name + "_temp",
                    shell=True)
    subprocess.call("cp -r " + file_name + "_t " + file_name, shell=True)

    # Write sample file
    # changing line so that it ends at +/- 1000 m
    X0 = insert(X0, 0, -1000)
    X0 = append(X0, 1000)
    Y0 = Y
    # interpolating for a refined line
    xSample = linspace(-1000, 1000, 2000)
    ySample = interp(xSample, X0, Y0)
    # opening file
    infile = open(sample_file, "r")
    outfile = open(sample_file + "_t", "w")
    N = i = 0
    value = 0.0
    # reading first line
    line = infile.readline()
    while line:
        # finding the "nonuniform" line
        if line.find("points") > 0 and line.find("//") < 0:
            outfile.write(line)
            # writing new sample line shape
            for x, y in zip(xSample, ySample):
                sampleLine = "		(	 %12.10f	 %12.10f	%12.10f	)\n" % (x, y + h,
                                                                    value)
                outfile.write(sampleLine)
            line = infile.readline()
        else:
            outfile.write(line)
            # reading new line
            line = infile.readline()
    # close files
    infile.close()
    outfile.close()
    # copying original to new and viceversa
    subprocess.call("cp -r " + sample_file + " " + sample_file + "_temp",
                    shell=True)
    subprocess.call("cp -r " + sample_file + "_t " + sample_file, shell=True)
def limit(temp, freq, tc, d=0, bcs=BCS):
    """
    Calculate the approximate complex conductivity to normal conductivity
    ratio in the limit hf << ∆ and kB T << ∆ given some temperature, frequency
    and transition temperature.
    Parameters
    ----------
    temp : float, iterable of size N
        Temperature in units of Kelvin.
    freq : float, iterable of size N
        Frequency in units of Hz.
    tc: float
        The transition temperature in units of Kelvin.
    d: float (optional)
        Ratio of the imaginary gap to the real gap energy at zero temperature.
    bcs: float (optional)
        BCS constant that relates the gap to the transition temperature.
        ∆ = bcs * kB * Tc. The default is superconductivity.utils.BCS.
    Returns
    -------
    sigma : numpy.ndarray, dtype=numpy.complex128
        The complex conductivity at temp and freq.
    Notes
    -----
    Extension of Mattis-Bardeen theory to a complex gap parameter covered in
        Noguchi T. et al. Physics Proc., 36, 2012.
        Noguchi T. et al. IEEE Trans. Appl. SuperCon., 28, 4, 2018.
    The real part of the gap is assumed to follow the BCS temperature
        dependence expanded at low temperatures. See equation 2.53 in
        Gao J. 2008. CalTech. PhD dissertation.
    No temperature dependence is assumed for the complex portion of the gap
        parameter.
    """
    # coerce inputs into numpy array
    temp, freq = coerce_arrays(temp, freq)
    assert (temp >= 0).all(), "Temperature must be >= 0."
    # break up gap into real and imaginary parts
    delta1 = delta_bcs(0, tc, bcs=bcs)
    delta2 = d * delta1
    # allocate memory for complex conductivity
    sigma1 = np.zeros(freq.size)
    sigma2 = np.zeros(freq.size)
    # separate out zero temperature
    zero = (temp == 0)
    not_zero = (temp != 0)
    freq0 = freq[zero]
    freq1 = freq[not_zero]
    temp1 = temp[not_zero]
    # define some parameters
    xi = sc.h * freq1 / (2 * sc.k * temp1)
    eta = delta1 / (sc.k * temp1)
    # calculate complex conductivity
    sigma1[zero] = np.pi * delta2 / (sc.h * freq0)
    sigma2[zero] = np.pi * delta1 / (sc.h * freq0)
    sigma1[not_zero] = (4 * delta1 / (sc.h * freq1) * np.exp(-eta) * np.sinh(xi) * sp.k0(xi) +
                        np.pi * delta2 / (sc.h * freq1) *
                        (1 + 2 * delta1 / (sc.k * temp1) * np.exp(-eta) * np.exp(-xi) * sp.i0(xi)))
    sigma2[not_zero] = np.pi * delta1 / (sc.h * freq1) * (1 - np.sqrt(2 * np.pi / eta) * np.exp(-eta) -
                                                          2 * np.exp(-eta) * np.exp(-xi) * sp.i0(xi))
    return combine_sigma(sigma1, sigma2)
예제 #52
0
def f(x):
    # Modified Bessel function of order 0
    return i0(x)
예제 #53
0
 def fun_correction(x):
     val = nobs_ratio * (z - nobs_inv) - i1(x) / i0(x)
     #print val
     return val
예제 #54
0
 def minfunc(kbT, s2s1, hw, D0):
     xi = hw / (2 * kbT)
     return np.abs(np.pi / 4 *
                   ((np.exp(D0 / kbT) - 2 * np.exp(-xi) * i0(xi)) /
                    (np.sinh(xi) * k0(xi))) - s2s1)
예제 #55
0
def gendataNeg(X):
    l = '%6s%23s%23s%23s%23s\n' % ('x', 'I0', 'I1', 'I2', 'I3')
    for i, x in enumerate(X):
        l += '%6.2f%23.15e%23.15e%23.15e%23.15e\n' % (x, sp.i0(x), sp.i1(x),
                                                      sp.iv(2, x), sp.iv(3, x))
    return l
예제 #56
0
def _i0(x, xp):
    """Wrapper for i0 that calls either the CPU or GPU implementation."""
    if xp is np:
        return i0(x)
    else:
        return i0_cupy(x)
예제 #57
0
def _L(P0, P):
    """
    The likelihood of P0 given P, L(P0|P), from Vaillancourt (2006).
    """
    L = _PDF(P, P0) / P / np.sqrt(np.pi / 2.0)
    return L / np.exp(-P**2 / 4.0) / i0(P**2 / 4.0)
예제 #58
0
def lap_transgwflow_cyl(s,
                        rad=None,
                        rpart=None,
                        Spart=None,
                        Tpart=None,
                        Qw=None,
                        Twell=None):
    '''
    The solution of the diskmodel for transient flow under a pumping condition
    in a confined aquifer in Laplace-space.
    The solutions assumes concentric disks around the pumpingwell,
    where each disk has its own transmissivity and storativity value.

    Parameters
    ----------
    s : :class:`numpy.ndarray`
        Array with all Laplace-space-points
        where the function should be evaluated
    rad : :class:`numpy.ndarray`
        Array with all radii where the function should be evaluated
    rpart : :class:`numpy.ndarray`
        Given radii separating the disks as well as starting- and endpoints
    Tpart : :class:`numpy.ndarray`
        Given transmissivity values for each disk
    Spart : :class:`numpy.ndarray`
        Given storativity values for each disk
    Qw : :class:`float`
        Pumpingrate at the well
    Twell : :class:`float`, optional
        Transmissivity at the well. Default: ``Tpart[0]``

    Returns
    -------
    lap_transgwflow_cyl : :class:`numpy.ndarray`
        Array with all values in laplace-space

    Example
    -------
    >>> lap_transgwflow_cyl([5,10],[1,2,3],[0,2,10],[1e-3,1e-3],[1e-3,2e-3],-1)
    array([[ -2.71359196e+00,  -1.66671965e-01,  -2.82986917e-02],
           [ -4.58447458e-01,  -1.12056319e-02,  -9.85673855e-04]])
    '''

    # ensure that input is treated as arrays
    s = np.squeeze(s).reshape(-1)
    rad = np.squeeze(rad).reshape(-1)
    rpart = np.squeeze(rpart).reshape(-1)
    Spart = np.squeeze(Spart).reshape(-1)
    Tpart = np.squeeze(Tpart).reshape(-1)

    # get the number of partitions
    parts = len(Tpart)

    # initialize the result
    res = np.zeros(s.shape + rad.shape)

    # set the general pumping-condtion
    if Twell is None:
        Twell = Tpart[0]
    Q = Qw / (2.0 * np.pi * Twell)

    # if there is a homgeneouse aquifer, compute the result by hand
    if parts == 1:
        # calculate the square-root of the diffusivities
        difsr = np.sqrt(Spart[0] / Tpart[0])

        for si, se in np.ndenumerate(s):
            Cs = np.sqrt(se) * difsr

            # set the pumping-condition at the well
            Qs = Q / se

            # incorporate the boundary-conditions
            if rpart[0] == 0.0:
                Bs = Qs
                if rpart[-1] == np.inf:
                    As = 0.0
                else:
                    As = -Qs * k0(Cs * rpart[-1]) / i0(Cs * rpart[-1])

            else:
                if rpart[-1] == np.inf:
                    As = 0.0
                    Bs = Qs / (Cs * rpart[0] * k1(Cs * rpart[0]))
                else:
                    det = i1(Cs*rpart[0])*k0(Cs*rpart[-1]) \
                        + k1(Cs*rpart[0])*i0(Cs*rpart[-1])
                    As = -Qs / (Cs * rpart[0]) * k0(Cs * rpart[-1]) / det
                    Bs = Qs / (Cs * rpart[0]) * i0(Cs * rpart[-1]) / det

            # calculate the head
            for ri, re in np.ndenumerate(rad):
                if re < rpart[-1]:
                    res[si + ri] = As * i0(Cs * re) + Bs * k0(Cs * re)

    # if there is more than one partition, create an equation system
    else:
        # initialize LHS and RHS for the linear equation system
        # Mb is the banded matrix for the Eq-System
        V = np.zeros(2 * (parts))
        Mb = np.zeros((5, 2 * (parts)))
        # the positions of the diagonals of the matrix set in Mb
        diagpos = [2, 1, 0, -1, -2]
        # set the standard boundary conditions for rwell=0.0 and rinf=np.inf
        Mb[1, 1] = 1.0
        Mb[-2, -2] = 1.0

        # calculate the consecutive fractions of the transmissivities
        Tfrac = Tpart[:-1] / Tpart[1:]

        # calculate the square-root of the diffusivities
        difsr = np.sqrt(Spart / Tpart)

        # calculate a temporal substitution
        tmp = Tfrac * difsr[:-1] / difsr[1:]

        # match the radii to the different disks
        pos = np.searchsorted(rpart, rad) - 1

        # iterate over the laplace-variable
        for si, se in enumerate(s):
            Cs = np.sqrt(se) * difsr

            # set the pumping-condition at the well
            # TODO: implement other pumping conditions
            V[0] = Q / se

            # set the boundary-conditions if needed
            if rpart[0] > 0.0:
                Mb[1, 1] = Cs[0] * rpart[0] * k1(Cs[0] * rpart[0])
                Mb[0, 2] = -Cs[0] * rpart[0] * i1(Cs[0] * rpart[0])
            if rpart[-1] < np.inf:
                Mb[-3, -1] = k0(Cs[-1] * rpart[-1])
                Mb[-2, -2] = i0(Cs[-1] * rpart[-1])

            # generate the equation system as banded matrix
            for i in range(parts - 1):
                Mb[0, 2 * i + 3] = -k0(Cs[i + 1] * rpart[i + 1])
                Mb[1, 2 * i + 2:2 * i + 4] = [
                    -i0(Cs[i + 1] * rpart[i + 1]),
                    k1(Cs[i + 1] * rpart[i + 1])
                ]
                Mb[2, 2 * i + 1:2 * i + 3] = [
                    k0(Cs[i] * rpart[i + 1]), -i1(Cs[i + 1] * rpart[i + 1])
                ]
                Mb[3, 2 * i:2 * i + 2] = [
                    i0(Cs[i] * rpart[i + 1]),
                    -tmp[i] * k1(Cs[i] * rpart[i + 1])
                ]
                Mb[4, 2 * i] = tmp[i] * i1(Cs[i] * rpart[i + 1])

            # genearate the cooeficient matrix as a spare matrix
            M = sps.spdiags(Mb, diagpos, 2 * parts, 2 * parts, format="csc")

            # solve the Eq-Sys and ignore errors from the umf-pack
            with warnings.catch_warnings():
                # warnings.simplefilter("ignore")
                warnings.simplefilter("ignore", SLV_WARN)
                X = sps.linalg.spsolve(M, V, use_umfpack=True)

            # to suppress numerical errors, set NAN values to 0
            X[np.logical_not(np.isfinite(X))] = 0.0

            # calculate the head
            res[si, :] = X[2 * pos] * i0(Cs[pos] * rad) + X[2 * pos + 1] * k0(
                Cs[pos] * rad)

        # set problematic values to 0
        # --> the algorithm tends to violate small values,
        #     therefore this approachu is suitable
        res[np.logical_not(np.isfinite(res))] = 0.0

    return res
예제 #59
0
def MMSESTSA(signal, fs, IS=0.25, W=1024, NoiseMargin=3, saved_params=None):
    '''
    MMSE-STSA method

    Args:
        signal : 一次元の入力信号
        fs     : サンプリング周波数
        IS     : 初期化用の無音 [sec] (default: 0.25)
    '''

    # window length is 25 msec
    #W = np.fix( 0.25 * fs )

    # Shift percentage is 40% (=10msec)
    # Overlap-Add method works good with this value(.4)
    SP = 0.4

    wnd = np.hamming(W)

    # pre-emphasis
    pre_emph = 0
    signal = scipy.signal.lfilter([1 - pre_emph], 1, signal)

    # number of initial silence segments
    NIS = int(np.fix((IS * fs - W) / (SP * W) + 1))

    # This function chops the signal into frames
    y = segment(signal, W, SP, wnd)
    Y = np.fft.fft(y, axis=0)

    # Noisy Speech Phase
    YPhase = np.angle(Y[0:int(np.fix(len(Y) / 2)) + 1, :])

    # Spectrogram
    Y = np.abs(Y[0:int(np.fix(len(Y) / 2)) + 1, :])

    numberOfFrames = Y.shape[1]

    # initial Noise Power Spectrum mean
    N = np.mean(Y[:, 0:NIS].T).T

    # initial Noise Power Spectrum variance
    LambdaD = np.mean((Y[:, 0:NIS].T)**2).T

    # used in smoothing xi (For Deciesion Directed method for estimation of A Priori SNR)
    alpha = 0.99

    # This is a smoothing factor for the noise updating
    NoiseLength = 9
    NoiseCounter = 0

    if saved_params != None:
        NIS = 0
        N = saved_params['N']
        LambdaD = saved_params['LambdaD']
        NoiseCounter = saved_params['NoiseCounter']

    # Initial Gain used in calculation of the new xi
    G = np.ones(N.shape)
    Gamma = G

    # Gamma function at 1.5
    Gamma1p5 = spc.gamma(1.5)
    X = np.zeros(Y.shape)

    for i in range(numberOfFrames):
        Y_i = Y[:, i]

        # If initial silence ignore VAD
        if i < NIS:
            SpeechFlag = 0
            NoiseCounter = 100
        else:
            # %Magnitude Spectrum Distance VAD
            NoiseFlag, SpeechFlag, NoiseCounter = vad(Y_i, N, NoiseCounter,
                                                      NoiseMargin)

        # If not Speech Update Noise Parameters
        if SpeechFlag == 0:
            N = (NoiseLength * N + Y_i) / (NoiseLength + 1)
            LambdaD = (NoiseLength * LambdaD + (Y_i**2)) / (1 + NoiseLength)

        # A postiriori SNR
        gammaNew = (Y_i**2) / LambdaD
        # Decision Directed Method for A Priori SNR
        xi = alpha * (G**2) * Gamma + (1 - alpha) * np.maximum(gammaNew - 1, 0)

        Gamma = gammaNew

        # A Function used in Calculation of Gain
        nu = Gamma * xi / (1 + xi)

        # MMSE STSA algo
        G = (Gamma1p5 * np.sqrt(nu) / Gamma) * np.exp(-nu / 2.0) *\
             ((1.0 + nu) * spc.i0(nu / 2.0) + nu * spc.i1(nu / 2.0))
        Indx = np.isnan(G) | np.isinf(G)
        G[Indx] = xi[Indx] / (1 + xi[Indx])

        X[:, i] = G * Y_i

    output = OverlapAdd2(X, YPhase, W, SP * W)
    return output, {'N': N, 'LambdaD': LambdaD, 'NoiseCounter': NoiseCounter}
예제 #60
0
파일: _vad.py 프로젝트: hdubey/vad-2
    def activations(self, sig, n_noise_frames=20):
        """
        Returns continuous activations of the voice activity detector.

        Parameters
        ----------

        sig : ndarray
            audio signal
        n_noise_frames : int
            number of frames at start of file to use for initial noise model

        """
        frames = self.stft(sig)
        n_frames = frames.shape[0]

        noise_var_tmp = zeros(self.NFFT // 2 + 1)
        for n in xrange(n_noise_frames):
            frame = frames[n]
            noise_var_tmp = noise_var_tmp + (conj(frame) * frame).real

        noise_var_orig = noise_var_tmp / n_noise_frames
        noise_var_old = noise_var_orig

        G_old = 1
        A_MMSE = zeros((self.NFFT // 2 + 1, n_frames))
        G_MMSE = zeros((self.NFFT // 2 + 1, n_frames))

        cum_Lambda = zeros(n_frames)
        for n in xrange(n_frames):
            frame = frames[n]
            frame_var = (conj(frame) * frame).real

            noise_var = noise_var_orig

            if self.max_est_iter == -1 or n < self.max_est_iter:
                noise_var_prev = noise_var_orig
                for iter_idx in xrange(self.n_iters):
                    gamma = frame_var / noise_var
                    Y_mag = np.abs(frame)

                    if n:
                        xi = (self.alpha *
                              ((A_MMSE[:, n - 1]**2 / noise_var_old) +
                               (1 - self.alpha) * maximum(gamma - 1, 0)))
                    else:
                        xi = (self.alpha +
                              (1 - self.alpha) * maximum(gamma - 1, 0))
                    v = xi * gamma / (1 + xi)
                    bessel_1 = i1(v / 2)
                    bessel_0 = i0(v / 2)
                    g_upd = (sqrt(pi) / 2) * (sqrt(v) / gamma) * np.exp(v/-2) * \
                        ((1 + v) * bessel_0 + v * bessel_1)
                    np.putmask(g_upd, np.logical_not(np.isfinite(g_upd)), 1.)
                    G_MMSE[:, n] = g_upd
                    A_MMSE[:, n] = G_MMSE[:, n] * Y_mag

                    gamma_term = gamma * xi / (1 + xi)
                    gamma_term = minimum(gamma_term, 1e-2)
                    Lambda_mean = (1 / (1 + xi) + exp(gamma_term)).mean()

                    weight = Lambda_mean / (1 + Lambda_mean)
                    if isnan(weight):
                        weight = 1

                    noise_var = \
                        weight * noise_var_orig + (1 - weight) * frame_var

                    diff = np.abs(np.sum(noise_var - noise_var_prev))
                    if diff < self.epsilon:
                        break
                    noise_var_prev = noise_var

            gamma = frame_var / noise_var
            Y_mag = np.abs(frame)

            if n:
                xi = self.alpha * ((A_MMSE[:, n - 1]**2 / noise_var_old) +
                                   (1 - self.alpha) * maximum(gamma - 1, 0))
            else:
                xi = self.alpha + (1 - self.alpha) * maximum(gamma - 1, 0)

            v = (xi * gamma) / (1 + xi)
            bessel_0 = i0(v / 2)
            bessel_1 = i1(v / 2)
            g_upd = (sqrt(pi) / 2) * (sqrt(v) / gamma) * \
                exp(v/-2) * ((1 + v) * bessel_0 + v * bessel_1)
            np.putmask(g_upd, np.logical_not(np.isfinite(g_upd)), 1.)
            G_MMSE[:, n] = g_upd
            A_MMSE[:, n] = G_MMSE[:, n] * Y_mag

            Lambda_mean = (log(1 / (1 + xi)) + gamma * xi / (1 + xi)).mean()

            G = ((self.a01 + self.a11 * G_old) /
                 (self.a00 + self.a10 * G_old) * Lambda_mean)

            cum_Lambda[n] = G

            G_old = G
            noise_var_old = noise_var
        return cum_Lambda