Example #1
0
def kaiser_bessel(kwidth, grid_mod, over_samp = 1.375):
    # kernel = np.zeros(krad * grid_mod)
    beta = np.pi * np.sqrt((kwidth * kwidth) / (over_samp * over_samp)
                           * (over_samp - 0.5) * (over_samp - 0.5) - 0.8)

    print beta

    x = np.linspace(0, kwidth, grid_mod)

    x1 = np.sqrt(1 - (x / kwidth) ** 2)

    y = np.i0(beta * x1)
    y = y / y[0]

    # pl.plot(x, y)
    # pl.show()

    fx = np.arange(192)
    fx = fx - 192/2.0
    fy = np.zeros(192, 'c16')

    deapp = np.sqrt(((np.pi * fx / kwidth)**2 - beta**2).astype('c16'))

    fy = np.sin(deapp)/deapp

    return(x, y, fy)
Example #2
0
def mk_kbd_window(alpha, N):
	n = numpy.arange(0., N/2)
	n = (n - N/4)/(N/4)
	n = n * n
	W = numpy.i0(numpy.pi*alpha*numpy.sqrt(1.0 - n))
	W = numpy.cumsum(W)
	W = numpy.sqrt(W / W[-1])
	return W
Example #3
0
def tlsAndMBT(params, temps, data, eps = None):
    """Return residual of model, weighted by uncertainties.

    Return value:
    residual -- the weighted or unweighted vector of residuals

    Arguments:
    params -- an lmfit Parameters object containing df, Fd, fRef, alpha, delta0
    temps -- a list/vector of temperatures at which to evaluate the model
    data -- a list/vector of data to compare with model
    eps -- a list/vector of uncertainty values for data

    len(temps) == len(data) == len(eps)"""
    #Unpack parameter values from params
    df = params['df'].value
    Fd = params['Fd'].value
    fRef = params['fRef'].value
    alpha = params['alpha'].value
    delta0 = params['delta0'].value
    tc = params['tc'].value
    dtc = params['dtc'].value
    zn = params['zn'].value

    #Convert fRef to f0
    f0 = fRef-df

    #Calculate model from parameters
    model = -df/(f0+df)+(f0/(f0+df))*(Fd/sc.pi* #TLS contribution
             (np.real(digamma(0.5+sc.h*f0/(1j*2*sc.pi*sc.k*(temps))))
              -np.log(sc.h*f0/(2*sc.pi*sc.k*(temps))))-

             alpha/4.0* #MBD contribution
             (np.sqrt((2*sc.pi*sc.k*temps)/delta0)*
              np.exp(-delta0/(sc.k*temps))+
              2*np.exp(-delta0/(sc.k*temps))*
              np.exp(-sc.h*f0/(2*sc.k*temps))*
              np.i0(sc.h*f0/(2*sc.k*temps)))+

              0.5*zn*(1+np.tanh((temps-tc)/dtc)))


    #Weight the residual if eps is supplied
    if data is not None:
        if eps is not None:
            residual = (model-data)/eps
        else:
            residual = (model-data)

        return residual
    else:
        return model
Example #4
0
def kaiser_bessel2(kwidth, grid_mod, over_samp, imsize=128):
    kw0 = kwidth/over_samp

    kr = kwidth/2.0

    beta = np.pi*np.sqrt((kw0*(over_samp-0.5))**2-0.8)

    # print beta

    kosf = np.floor(0.91/(over_samp*1e-3));
    # print kr
    # print kosf
    # print kosf*kr
    om = np.arange(kosf*kr)/(kosf*kr)
    # print om

    x = np.linspace(0, kr, grid_mod)

    x_bess = np.sqrt(1 - (x / kr) ** 2)
    # print x_bess
    y = np.i0(beta * x_bess)
    y = y / y[0]

    # pl.plot(y)
    # pl.show()

    fx = np.arange(imsize*over_samp)
    
    fx = fx - imsize*over_samp/2
    fx = fx / imsize * 1.5
    # print fx

    sqa = np.sqrt((np.pi*np.pi*kw0*kw0*fx*fx-beta*beta).astype('c16'))
    # print sqa
    fy = np.sin(sqa)/sqa
    fy = abs(fy)
    fy = fy / fy.max()
    # print fy

    # pl.plot(fy)
    # pl.show()

    return (x,y,fx,fy)
Example #5
0
    def calc_kernel_kb(self, grid_params):

        kw0 = 2.0 * grid_params.krad / grid_params.over_samp
        kr = grid_params.krad

        beta = np.pi * \
            np.sqrt((kw0 * (grid_params.over_samp - 0.5)) ** 2 - 0.8)

        x = np.linspace(0, kr, grid_params.grid_mod)
        print x
        x_bess = np.sqrt(1 - (x / kr) ** 2)

        y = np.i0(beta * x_bess)
        y = y / y[0]

        x = np.concatenate((x, np.array([0.0, ])))
        y = np.concatenate((y, np.array([0.0, ])))

        self.kx = x
        self.ky = y

        pl.figure()
        pl.plot(y)
def KBDWindow(dataSampleArray,alpha=4.):
    """
    Returns a copy of the dataSampleArray KBD-windowed
    KBD window is defined following pp. 108-109 and pp. 117-118 of
    Bosi & Goldberg, "Introduction to Digital Audio..." book
    """

    N = float(len(dataSampleArray))
    t = arange(int(N/2 + 1))
    input = dataSampleArray.copy()

    # i0 --> 0th modified bessel function
    kaiser = i0(alpha * pi * sqrt(1.0 - (4.0 * t / N - 1.0) ** 2)) / np.i0(np.pi * alpha)

    denominator =  sum(kaiser ** 2)

    numerator = cumsum(kaiser[:-1] ** 2)
    numerator = concatenate((numerator, numerator[::-1]), axis=0)

    window = sqrt(numerator /  denominator)
    out = input * window

    return out
Example #7
0
    def importanceSampling(self, K):
        # Init variables
        nx = self.sz
        ny = self.sz
        logZ = 0.
        samples = np.zeros( (K, nx, ny) )
        evalPQ = np.zeros( K )
        
        for iX in range(nx):
            for iY in range(ny):
                kappa = 0.
                logZ += np.log( 2 * np.pi * np.i0(kappa) )
                samples[:, iX, iY] = np.random.vonmises(0, kappa, K)
        #print logZ

        for iSample in range(K):
            for iX in range(nx):
                for iY in range(ny):
                    curInd = hlp.ravel_multi_index( (iX, iY), (nx,ny) )
                    if iX > 0:
                        neiInd = hlp.ravel_multi_index( (iX-1, iY), (nx,ny) )
                        evalPQ[iSample] += self.J * np.cos( samples[iSample, iX, iY] - samples[iSample, iX-1, iY] ) 
                    else:
                        neiInd = hlp.ravel_multi_index( (nx-1, iY), (nx,ny) )
                        evalPQ[iSample] += self.J * np.cos( samples[iSample, iX, iY] - samples[iSample, nx-1, iY] ) 
                    if iY > 0:
                        neiInd = hlp.ravel_multi_index( (iX, iY-1), (nx,ny) )
                        evalPQ[iSample] += self.J * np.cos( samples[iSample, iX, iY] - samples[iSample, iX, iY-1] )
                    else:
                        neiInd = hlp.ravel_multi_index( (iX, ny-1), (nx,ny) )
                        evalPQ[iSample] += self.J * np.cos( samples[iSample, iX, iY] - samples[iSample, iX, ny-1] )                        
        
        evalMax = np.max( evalPQ )
        evalPQ = np.exp( evalPQ - evalMax )
        logZ += evalMax - np.log(K) + np.log( np.sum( evalPQ ) )
        return logZ
Example #8
0
    def sample(self):
        if self.tr_cond == 'all_gains':
            G = (1.0 / self.stim_dur) * np.random.choice(
                [1.0], size=(self.n_loc, self.batch_size))
            G = np.repeat(G, self.n_in, axis=0).T
            G = np.tile(G, (self.stim_dur, 1, 1))
            G = np.swapaxes(G, 0, 1)
        else:
            G = (0.5 / self.stim_dur) * np.random.choice(
                [1.0], size=(1, self.batch_size))
            G = np.repeat(G, self.n_in * self.n_loc, axis=0).T
            G = np.tile(G, (self.stim_dur, 1, 1))
            G = np.swapaxes(G, 0, 1)

        H = (1.0 / self.resp_dur) * np.ones(
            (self.batch_size, self.resp_dur, self.nneuron))

        # Target presence/absence and stimuli
        C = np.random.choice([0.0, 1.0], size=(self.batch_size, ))
        C1ind = np.where(C == 1.0)[0]  # change

        S1 = np.pi * np.random.rand(self.n_loc, self.batch_size)
        S2 = S1
        S1 = np.repeat(S1, self.n_in, axis=0).T
        S1 = np.tile(S1, (self.stim_dur, 1, 1))
        S1 = np.swapaxes(S1, 0, 1)

        S2[np.random.randint(0, self.n_loc, size=(len(C1ind), )),
           C1ind] = np.pi * np.random.rand(len(C1ind))
        S2 = np.repeat(S2, self.n_in, axis=0).T
        S2 = np.tile(S2, (self.resp_dur, 1, 1))
        S2 = np.swapaxes(S2, 0, 1)

        # Noisy responses
        L1 = G * np.exp(self.kappa * (np.cos(2.0 * (S1 - np.tile(
            self.phi,
            (self.batch_size, self.stim_dur, self.n_loc)))) - 1.0))  # stim 1
        L2 = H * np.exp(self.kappa * (np.cos(2.0 * (S2 - np.tile(
            self.phi,
            (self.batch_size, self.resp_dur, self.n_loc)))) - 1.0))  # stim 2
        Ld = (self.spon_rate / self.delay_dur) * np.ones(
            (self.batch_size, self.delay_dur, self.nneuron))  # delay

        R1 = np.random.poisson(L1)
        R2 = np.random.poisson(L2)
        Rd = np.random.poisson(Ld)

        example_input = np.concatenate((R1, Rd, R2), axis=1)
        example_output = np.repeat(C[:, np.newaxis], self.total_dur, axis=1)
        example_output = np.repeat(example_output[:, :, np.newaxis], 1, axis=2)

        cum_R1 = np.sum(R1, axis=1)
        cum_R2 = np.sum(R2, axis=1)

        mu_x = np.asarray([
            np.arctan2(
                np.dot(cum_R1[:, i * self.n_in:(i + 1) * self.n_in],
                       np.sin(2.0 * self.phi)),
                np.dot(cum_R1[:, i * self.n_in:(i + 1) * self.n_in],
                       np.cos(2.0 * self.phi))) for i in range(self.n_loc)
        ])
        mu_y = np.asarray([
            np.arctan2(
                np.dot(cum_R2[:, i * self.n_in:(i + 1) * self.n_in],
                       np.sin(2.0 * self.phi)),
                np.dot(cum_R2[:, i * self.n_in:(i + 1) * self.n_in],
                       np.cos(2.0 * self.phi))) for i in range(self.n_loc)
        ])

        temp_x = np.asarray([
            np.swapaxes(np.multiply.outer(cum_R1, cum_R1), 1, 2)[i, i, :, :]
            for i in range(self.batch_size)
        ])
        temp_y = np.asarray([
            np.swapaxes(np.multiply.outer(cum_R2, cum_R2), 1, 2)[i, i, :, :]
            for i in range(self.batch_size)
        ])

        kappa_x = np.asarray([
            np.sqrt(
                np.sum(temp_x[:, i * self.n_in:(i + 1) * self.n_in,
                              i * self.n_in:(i + 1) * self.n_in] *
                       np.repeat(np.cos(
                           np.subtract(
                               np.expand_dims(self.phi, axis=1),
                               np.expand_dims(self.phi,
                                              axis=1).T))[np.newaxis, :, :],
                                 self.batch_size,
                                 axis=0),
                       axis=(1, 2))) for i in range(self.n_loc)
        ])
        kappa_y = np.asarray([
            np.sqrt(
                np.sum(temp_y[:, i * self.n_in:(i + 1) * self.n_in,
                              i * self.n_in:(i + 1) * self.n_in] *
                       np.repeat(np.cos(
                           np.subtract(
                               np.expand_dims(self.phi, axis=1),
                               np.expand_dims(self.phi,
                                              axis=1).T))[np.newaxis, :, :],
                                 self.batch_size,
                                 axis=0),
                       axis=(1, 2))) for i in range(self.n_loc)
        ])

        if self.n_loc == 1:
            d = np.i0(kappa_x) * np.i0(kappa_y) / np.i0(
                np.sqrt(kappa_x**2 + kappa_y**2 +
                        2.0 * kappa_x * kappa_y * np.cos(mu_y - mu_x)))
        else:
            d = np.nanmean(np.i0(kappa_x) * np.i0(kappa_y) / np.i0(
                np.sqrt(kappa_x**2 + kappa_y**2 +
                        2.0 * kappa_x * kappa_y * np.cos(mu_y - mu_x))),
                           axis=0)

        P = d / (d + 1.0)
        return example_input, example_output, C, P
Example #9
0
import numpy as np
from pylab import *

x = np.linspace(0, 4, 100)
vals = np.i0(x)

plot(x, vals)
show()
Example #10
0
         reason="NEP-18 support is not available in NumPy"),
 ),
 pytest.param(
     lambda x: np.imag(x),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE,
         reason="NEP-18 support is not available in NumPy"),
 ),
 pytest.param(
     lambda x: np.fix(x),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE,
         reason="NEP-18 support is not available in NumPy"),
 ),
 pytest.param(
     lambda x: np.i0(x.reshape((24, ))),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE,
         reason="NEP-18 support is not available in NumPy"),
 ),
 pytest.param(
     lambda x: np.sinc(x),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE,
         reason="NEP-18 support is not available in NumPy"),
 ),
 pytest.param(
     lambda x: np.nan_to_num(x),
     marks=pytest.mark.skipif(
         not IS_NEP18_ACTIVE,
         reason="NEP-18 support is not available in NumPy"),
Example #11
0
def vonmises(x, amp, cen, kappa):
    #    "1-d vonmises"
    return (amp / (np.pi * 2 * np.i0(kappa))) * np.exp(kappa * np.cos(x - cen))
# print('kappa estimate with formula = ',k_est)
# use fsolve, to estimate 'kappa', when 'mu' has been estimated through
# equation (2.4) of Banerjee(2005):
#r_min = optimize.fsolve(myF_kappa, [0.0], args=r_bar)
## r_min = optimize.fmin_bfgs(myF, (0,0))
#print(r_min)
#
## use fsolve, to estimate 'kappa' and 'mu':
#initParams = [ 1, 1 ]
#results = optimize.minimize(vmf_log2, initParams, method='nelder-mead')
#print(results.x)

# ---------------------------------------------------------------------- #
# define the von Mises with an expression:
num = 1.0
denom = (2 * np.pi) * i0(kappa_)
conss = num / denom

y11 = stats.vonmises.logpdf(X_samples, kappa=kappa_, loc=loc_)
y22 = np.log(conss * np.exp(kappa_ * CS.dot(loc_cs).T))
# plot the logarithm of the pdf of the von Mises distribution sample:
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(y11, 'r', label='yPred')
ax.plot(y22, 'b', label='yObs')
ax.legend()

sy1 = -np.sum(y11)
sy2 = -np.sum(y22)

#return X, X_samples
def bessel_(x):
    bessel_.NumParam = 0
    bessel_.NumVars = 1

    return np.i0(x)
Example #14
0
#width = pulseWidth(period, meanFref,randomNum)
count = 0
#1/width/width > 100
#exp(-pow(phase,2)/2.0/width/width)/width/sqrt(2.* 3.14159265) * randarr[phasenum]* exp(gasdev(&idum)) / e;
profile = np.zeros(phaseNum)
for i in range(phaseNum):
    tval = i*tsamp 
    phase = tval/period % 1.
    if (phase < 0.) : phase += 1.
    phase -= 0.5;
    
    #phasenum = (int)(tval/head->p0-fmod(tval/head->p0, 1.));
    if 1/width/width > 100:
        profile[i] = math.exp(-pow(phase,2)/2.0/width/width)/width/np.sqrt(2.* math.pi) #* randarr[phasenum]* exp(gasdev(&idum)) / e
    else:
        profile[i] = math.exp((1/width/width) * math.cos(phase))/2./math.pi/np.i0((1/width/width)) # * randarr[phasenum] * exp(gasdev(&idum)) / e;

for i in range(phaseNum):
    if profile[i] >= max(profile)*0.7:
        count += 1
    #profile[i] = math.exp((1/width/width) * math.cos(phase))/2./math.pi/np.i0((1/width/width)) # * randarr[phasenum] * exp(gasdev(&idum)) / e;
print sum(profile)*1./phaseNum
profile = profile *0.001/ (sum(profile)*1./phaseNum)
print count,len(profile),count*1./float(len(profile))
print int(period / tsamp)
print 'width: %s' % (np.size(np.where(profile > 0.00001))/(phaseNum*1.))
print 'num of profile > 0: %s' %(np.size(np.where(profile > 0.00001)))
print 'width: %s, phaseNum: %s' %(width, phaseNum)
plt.plot(profile)
#plt.scatter(np.arange(len(profile)),profile)
plt.xlabel('phaseNum')
Example #15
0
    def smc(self, order, N, NT=1.1, resamp='mult', verbose=False):
        """
        SMC algorithm to estimate (log)Z of the classical XY model with 
        free boundary conditions.
        
        Parameters
        ----------
        order : 1-D array_like
            The order in which to add the random variables x, flat index.
        N : int
            The number of particles used to estimate Z.
        NT : int
            Threshold for ESS-based resampling (0,1] or 1.1. Resample if ESS < NT*N (NT=1.1, resample every time)
        resamp : string
            Type of resampling scheme {mult, res, strat, sys}.
        verbose : bool
            Output changed to logZk, xMean, ESS.
        
        Output
        ------
        logZ : float
            Estimate of (log)Z in double precision.
        """
        # Init variables
        nx = self.sz
        ny = self.sz
        logZ = np.zeros( nx*ny )
        indSorted = order.argsort()
        orderSorted = order[indSorted]
        # SMC specific
        trajectory = np.zeros( (N, len(order)) )
        ancestors = np.zeros( N, np.int )
        nu = np.zeros( N )
        tempNu = np.zeros( N )
        ess = np.zeros( len(order)-1 )
        iter = 0

        # -------
        #   SMC
        # -------
        # First iteration
        ix, iy = hlp.unravel_index( order[0], (nx,ny) )
        tempMean = 0.
        tempDispersion = 0.
        trajectory[:,0] = np.random.vonmises(tempMean, tempDispersion, N)
        # Log-trick update of adjustment multipliers and logZ
        tempDispersion = np.zeros(N)

        for iSMC in range( 1, len(order) ):
            # Resampling with log-trick update
            nu += np.log(2 * np.pi * np.i0(tempDispersion))
            nuMax = np.max(nu)
            tempNu = np.exp( nu - nuMax )
            c = np.sum(tempNu)
            tempNu /= c
            ess[iSMC-1] = 1 / (np.sum(tempNu**2))

            if ess[iSMC-1] < NT*float(N):
                nu = np.exp( nu - nuMax )
                if iter > 0:
                    logZ[iter] = logZ[iter-1] + nuMax + np.log( np.sum(nu) ) - np.log(N)
                else:
                    logZ[iter] = nuMax + np.log( np.sum(nu) ) - np.log(N)
                c = np.sum(nu)
                nu /= c
                ancestors = hlp.resampling( nu, scheme=resamp )
                nu = np.zeros( N )
                trajectory[:,:iSMC] = trajectory[ancestors, :iSMC]
                iter += 1

            # Calculate optimal proposal and adjustment multipliers
            ix, iy = hlp.unravel_index( order[iSMC], (nx,ny) )
            tempMean = np.zeros( N )
            tempDispersion = np.zeros( N )
            if ix > 0:
                tempInd = hlp.ravel_multi_index( (ix-1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (nx-1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if ix < nx-1:
                tempInd = hlp.ravel_multi_index( (ix+1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (0,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if iy > 0:
                tempInd = hlp.ravel_multi_index( (ix,iy-1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (ix,ny-1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if iy < ny-1:
                tempInd = hlp.ravel_multi_index( (ix,iy+1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (ix,0), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            for iParticle in range(N):
                trajectory[iParticle, iSMC] = hlp.vonmises(tempMean[iParticle], tempDispersion[iParticle])

        nu += np.log(2 * np.pi * np.i0(tempDispersion))
        nuMax = np.max(nu)
        nu = np.exp( nu - nuMax )
        logZ[iter] = logZ[iter-1] + nuMax + np.log( np.sum(nu) ) - np.log(N)
        
        if verbose:
            c = np.sum(nu)
            nu /= c
            trajMean = np.mean( (np.tile(nu, (len(order),1))).T*trajectory, axis=0 )
            return logZ, trajMean[order].reshape( (nx,ny) ), ess
        else:
            return logZ[iter]
Example #16
0
#   General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with BASILISK.  If not, see <http://www.gnu.org/licenses/>.

"""
Von Mises sampler and density.
"""



from math import pi, acos, cos, sin
from numpy import log, exp, i0            # i0 is modified Bessel function of first kind
from random import vonmisesvariate

i0_float = lambda x: float(i0(float(x)))

def to_radian(coords):
   if coords[1]>0:
      return acos(coords[0])
   else:
      return 2*pi-acos(coords[0])

def to_coords(angle):
   return array([cos(angle), sin(angle)])


def mod2pi(x):
   if x >= 2*pi:
      return mod2pi(x - 2*pi) 
   if x < 0:
Example #17
0
    lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),
    lambda x: abs(x),
    lambda x: x > 0.5,
    lambda x: x.rechunk((4, 4, 4)),
    lambda x: x.rechunk((2, 2, 1)),
    pytest.param(lambda x: da.einsum("ijk,ijk", x, x),
                 marks=pytest.mark.xfail(
                     reason='depends on resolution of https://github.com/numpy/numpy/issues/12974')),
    lambda x: np.isreal(x),
    lambda x: np.iscomplex(x),
    lambda x: np.isneginf(x),
    lambda x: np.isposinf(x),
    lambda x: np.real(x),
    lambda x: np.imag(x),
    lambda x: np.fix(x),
    lambda x: np.i0(x.reshape((24,))),
    lambda x: np.sinc(x),
    lambda x: np.nan_to_num(x),
]


@pytest.mark.parametrize('func', functions)
def test_basic(func):
    c = cupy.random.random((2, 3, 4))
    n = c.get()
    dc = da.from_array(c, chunks=(1, 2, 2), asarray=False)
    dn = da.from_array(n, chunks=(1, 2, 2))

    ddc = func(dc)
    ddn = func(dn)
Example #18
0
def get_angle_biased(x, k):
    random_angle = np.random.uniform(-np.pi, np.pi)
    angle = (1/(2*np.pi*np.i0(k)))*(np.e**(k*(x-random_angle)))
    return angle
Example #19
0
 def forward(ctx, inp, nu):
     ctx._nu = nu
     ctx.save_for_backward(inp)
     return torch.from_numpy(np.i0(inp.detach().numpy()))
Example #20
0
import numpy as np
import matplotlib.pyplot as mp

x = np.linspace(-5, 5, 1001)
y = np.i0(x)
mp.gcf().set_facecolor(np.ones(3) * 240 / 255)
mp.title('Bessel Function', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(x, y, c='dodgerblue', label='Bessel')
mp.legend()
mp.show()
Example #21
0
def vonmises(x, amp, cen, kappa):
    # "1-d vonmises"
    top = (amp / (np.pi * 2 * np.i0(kappa)))
    bot = np.exp(kappa * np.cos(x / 360.0 * 2.0 * np.pi - cen))
    return top * bot
Example #22
0
def tune(plotfreq=False, plottime=False, input_device_index=None):
    # Set up the Kaiser window
    n = np.arange(CHUNK_SIZE) + 0.5  # Assuming CHUNK_SIZE is even
    x = (n - CHUNK_SIZE / 2) / (CHUNK_SIZE / 2)
    window = np.i0(KAISER_BETA * np.sqrt(1 - x ** 2)) / np.i0(KAISER_BETA)

    # Get audio data
    p = pyaudio.PyAudio()
    #device_info = p.get_device_info_by_index(input_device_index)
    #print(device_info)
    stream = p.open(format=FORMAT, channels=1, rate=RATE, input=True,
                    input_device_index=input_device_index,
                    frames_per_buffer=CHUNK_SIZE)

    if plotfreq or plottime:
        # Set up plotting paraphernalia
        plt.ion()
        if plottime:
            figtime = plt.figure()
            axtime = figtime.gca()
        if plotfreq:
            figfreq = plt.figure()
            axfreq = figfreq.gca()

    print 'Press return to stop...'

    i = 0
    while 1:
        i += 1

        # Check if something has been input. If so, exit.
        if sys.stdin in select([sys.stdin, ], [], [], 0)[0]:
            # Absorb the input and break
            sys.stdin.readline()
            break

        # Acquire sound data
        snd_data = array('h', stream.read(CHUNK_SIZE))
        signal = np.array(snd_data)
        #if sys.byteorder == 'big':
            #snd_data.byteswap()

        if plottime:
            if i > 1:
                axtime.lines.remove(timeline)
            [timeline, ] = axtime.plot(signal, 'b-')
            figtime.canvas.draw()

        # Apply a Kaiser window on the signal before taking the FFT. This
        # makes the signal look better if it is periodic. Derivatives at the
        # edges of the signal match better, which means that the frequency
        # domain will have fewer side-lobes. However, it does cause each spike
        # to grow a bit broader.
        # One can change the value of beta to tradeoff between side-lobe height
        # and main lobe width.
        signal *= window
        spectrum = np.fft.rfft(signal, int(RATE / RESOLUTION))
        peak = np.argmax(abs(spectrum))         # peak directly gives the
                                                # desired frequency
        # Threshold on the maximum peak present in the signal (meaning we
        # expect the signal to be approximately unimodal)
        if spectrum[peak] > THRESHOLD:
            # Put a band-pass filter in place to look at only those frequencies
            # we want. The desired peak is the harmonic located in the
            # frequency region of interest.
            desired_peak = np.argmax(abs(spectrum[90:550]))
            print desired_peak

            if plotfreq:
                try:
                    axfreq.lines.remove(freqline)
                except UnboundLocalError:
                    pass
                [freqline, ] = axfreq.plot(abs(spectrum), 'b-')
                figfreq.canvas.draw()

    stream.stop_stream()
    stream.close()
    p.terminate()
Example #23
0
def myF_2vM1U(theta, *params):
    """
    The first derivative of the log-likelihood function 'l' to be set equal to 
    zero in order to estimate the mixture parameters:
        p1, p2: the weights of the two von Mises distributions on semi-circle
        kappa2, kappa2: the concentrations of the two von Mises distributions
        mu1, mu2: the locations of the two von Mises distributions
        theta:= [ p1, kappa1, mu1, p2, kappa2, mu2 ]
        params:= X_samples: the observations sample 
    The function returns a vector F with the derivatives of 'l' wrt the 
    components of theta. 
    This function is to be called with optimize.fsolve function of scipy:
        roots_ = optimize.fsolve( myF_2vM1U, in_guess, args=my_par )
    """
    # scal_ = 0.5
    ll = 2.0
    x_ = np.array(params).T
    #    print(type(x_))
    #    print('x_=', x_.shape)

    # the unknown parameters:
    p1_ = theta[0]
    kappa1_ = theta[1]
    m1_ = theta[2]
    p2_ = theta[3]
    kappa2_ = theta[4]
    m2_ = theta[5]
    pu_ = 1.0 - p1_ - p2_

    # the 1st von Mises distribution on semi-circle:
    fvm1_ = _vmf_pdf(x_, kappa1_, m1_, ll)
    # fvm1_ = stats.vonmises.pdf( x_, kappa1_, m1_, scal_ )

    # the 2nd von Mises distribution on semi-circle:
    fvm2_ = _vmf_pdf(x_, kappa2_, m2_, ll)
    # fvm2_ = stats.vonmises.pdf( x_, kappa2_, m2_, scal_ )

    # the uniform distribution:
    xu_1 = min(x_)
    xu_2 = (max(x_) - min(x_))
    fu_ = stats.uniform.pdf(x_, loc=xu_1, scale=xu_2)

    # mixture distribution:
    fm_ = p1_ * fvm1_ + p2_ * fvm2_ + pu_ * fu_

    # first derivative wrt weight p1:
    dldp1 = sum(np.divide(np.subtract(fvm1_, fu_), fm_))

    # first derivative wrt weight p2:
    dldp2 = sum(np.divide(np.subtract(fvm2_, fu_), fm_))

    # first derivative wrt location mu1:=
    dldm1 = (ll*kappa1_*p1_)*sum( np.multiply( np.divide( fvm1_, fm_ ), \
                                         np.sin(ll*(x_ - m1_)) ) )

    # first derivative wrt location mu2:=
    dldm2 = (ll*kappa2_*p2_)*sum( np.multiply( np.divide( fvm2_, fm_ ), \
                                         np.sin(ll*(x_ - m2_)) ) )

    # first derivative wrt concentration kappa1:
    Ak1 = (iv(1.0, kappa1_) / i0(kappa1_))
    dldk1 = p1_*sum( np.multiply( np.divide( fvm1_, fm_ ), \
                                 ( np.cos(ll*(x_ - m1_)) - Ak1 ) ) )

    # first derivative wrt concentration kappa1:
    Ak2 = (iv(1.0, kappa2_) / i0(kappa2_))
    dldk2 = p2_*sum( np.multiply( np.divide( fvm2_, fm_ ), \
                                 ( np.cos(ll*(x_ - m2_)) - Ak2 ) ) )

    F = [dldp1[0], dldk1[0], dldm1[0], dldp2[0], dldk2[0], dldm2[0]]

    return F
    def c1(cls, th, k1, a, b):
        """First component of the fitted function.
        """

        return a * b * np.exp(k1 * np.cos(th)) / (2. * np.pi * np.i0(k1))
Example #25
0
left=cutoff1d[0]
right=cutoff1d[1]
if left==0:
    scalefreq=0.0
elif right==1:
    scalefreq=1
else:
    scalefreq=0.5*(left+right)
s=0
kais=[]
for i in range(N):
    # print sp.bessel(0,beta*np.sqrt(1-np.power((i-alpha)/alpha,2)),analog=True)
    # print beta
    c.append(np.cos(np.pi*m[i]*scalefreq))
    kais.append(np.i0(beta*np.sqrt(1-np.power((i-alpha)/alpha,2)))/np.i0(beta))
    FIR.append(h[i]*kais[i])
    s=s+FIR[i]*c[i]



pl.plot(c,label='c')
pl.plot(h,label='h')
pl.plot(kais,label='kais')
pl.plot(FIR,label='FIR')
pl.legend()
pl.figure()

Fh=np.fft.fftshift(np.fft.fft(h))
Fc=np.fft.fftshift(np.fft.fft(c))
Fkais=np.fft.fftshift(np.fft.fft(kais))
Example #26
0
#!/usr/bin/python

import numpy
from pylab import *

x = numpy.linspace(0, 4, 100)
vals = numpy.i0(x)

plot(x, vals)
show()
"""

#Script to evaluation the response of each angle relative to the imaging angle
import matplotlib.pyplot as plt
import numpy as np
from smart_rotation import smart_rotation
filepath = "/mnt/fileserver/Henry-SPIM/smart_rotation/06142018/sample1/merged/workspace_final3/angularcount_final/"
sr = smart_rotation(24,10)
sr.evaluate_angles(filepath,24,10,0,False)
savepath = "/mnt/fileserver/Henry-SPIM/smart_rotation/06142018/sample1/merged/workspace_final3/figures/"
alpha = 0.2
fig,(a0,a1,a2,a3) = plt.subplots(4,1,sharex = True,figsize=(4.5,3),gridspec_kw={'height_ratios':[12,1,1,1]})
plt.subplots_adjust(left=0,right=1,top=1,bottom=0,hspace=0.03)
linewidth = 1
markersize=2
a0.plot(np.arange(0,360,10),np.divide(sr.a[:,0]/np.pi/2,np.i0(sr.k))*np.exp(np.abs(sr.k))[:,0],'g-o',label="max",lw =linewidth,markersize=markersize)
a0.plot(np.arange(0,360,10),np.divide(sr.a[:,0]/np.pi/2,np.i0(sr.k))*np.exp(-np.abs(sr.k))[:,0],'r-o',label="min",lw=linewidth,markersize=markersize)
a0.grid(False)
a0.legend(loc="upper right")
a0.set_xlim(0,360)
a0.set_ylim(0,15000)
a0.xaxis.tick_top()
#a0.set_xlabel('imaging angle')
a0.set_xlabel('Angle within sample')
axis = [a1,a2]
for i in axis:
    i.set_yticklabels([])
    i.xaxis.set_ticks_position('none')
    i.yaxis.set_ticks_position('none')
    i.set_xlim(0,360)
mini = 1
Example #28
0
print u"分期付款"
#pmt输入为利率和期数,总价,输出每期钱数
print "Payment",np.pmt(0.10/12,12*30,100000)
#nper参数为贷款利率,固定的月供和贷款额,输出付款期数
print "Number of payments", np.nper(0.10/12,-100,9000)
#rate参数为付款期数,每期付款资金,现值和终值计算利率
print "Interest rate",12*np.rate(167,-100,9000,0)

print u"窗函数"
#bartlett函数可以计算巴特利特窗
window = np.bartlett(42)
print "bartlett",window
#blackman函数返回布莱克曼窗。该函数唯一的参数为输出点的数量。如果数量
#为0或小于0,则返回一个空数组。
window = np.blackman(10)
print "blackman",window
# hamming函数返回汉明窗。该函数唯一的参数为输出点的数量。如果数量为0或
# 小于0,则返回一个空数组。
window = np.hamming(42)
print "hamming",window
# kaiser函数返回凯泽窗。该函数的第一个参
# 数为输出点的数量。如果数量为0或小于0,则返回一个空数组。第二个参数为β值。
window = np.kaiser(42,14)
print "kaiser",window
# 以i0 表示第一类修正的零阶贝塞尔函数。
x= np.linspace(0,4,100)
vals = np.i0(x)
print "i0",vals
val = np.sinc(x)
print "sinc",val
Example #29
0
 def likelyhood(self,theta, r):
     """likelyhood
     computes the likelyhood that an ellipticity with angle theta at distance r is recorded"""
     br = self.b0 * r**self.alpha
     expar = (1./(2.*numpy.pi * numpy.i0(br)))
     return numpy.exp(-(br*cos(2*theta))) * expar