コード例 #1
0
ファイル: nee.py プロジェクト: RobieH/honours
def main():

    obj=plt.imread('jerichoObject.bmp')
    ref=plt.imread('jerichoRef.bmp')

    holo=obj-ref

    Kreal=np.empty(holo.shape)+0j
    Kimag=np.empty(holo.shape)+0j
    temp=np.empty(holo.shape)+0j

    wavelength=405e-9
    k=2*np.pi/(wavelength)
    z=250e-6
    #z=13e-3-250e-6

    distX=6e-6
    distY=6e-6

    n=float(holo.shape[0])
    m=float(holo.shape[1])

    a = np.arange(0,n)
    b = np.arange(0,m)

    '''create all r vectors'''
    R = np.empty((holo.shape[0], holo.shape[1], 3))
    R[:,:,0] = np.repeat(np.arange(holo.shape[0]), holo.shape[1]).reshape(holo.shape) * distX
    R[:,:,1] = np.arange(holo.shape[1]) * distY
    R[:,:,2] = z

    '''create all ksi vectors'''
    KSI = np.empty((holo.shape[0], holo.shape[1], 3))
    KSI[:,:,0] = np.repeat(np.arange(holo.shape[0]), holo.shape[1]).reshape(holo.shape) * distX
    KSI[:,:,1] = np.arange(holo.shape[1]) * distY
    KSI[:,:,2] = z

    # vectorized 2-norm; see http://stackoverflow.com/a/7741976/4323
    KSInorm = np.sum(np.abs(KSI)**2,axis=-1)**(1./2)

    # loop over entire holo which is same shape as holo, rows first
    # this loop populates holo one pixel at a time (so can be parallelized)
    for x in xrange(holo.shape[0]):
        for y in xrange(holo.shape[1]):

            print(x, y)

            KSIdotR = np.dot(KSI, R[x,y])
            temp = holo * np.exp(1j * k * KSIdotR / KSInorm)

            '''interpolate so that we can do the integral and take the integral'''
            temp2 = rbs(a, b, temp.real)
            Kreal[x,y] = temp2.integral(0, n, 0, m)
            temp3 = rbs(a, b, temp.imag)
            Kimag[x,y] = temp3.integral(0, n, 0, m)


    Kreal.dump('Kreal.dat')
    Kimag.dump('Kimag.dat')
コード例 #2
0
ファイル: hh.py プロジェクト: RobieH/honours
def func(smallX,smallY):
    ''' Function used to calculate the integral '''
    print(smallX,smallY)
    temp2=ne.evaluate('temp*exp((1j*k*(smallX*Xprime+smallY*Yprime))/L)')
    temp3=rbs(i,j,temp2.real)
    Kreal[smallX,smallY]=temp3.integral(0,kx,0,ky)
    temp4=rbs(i,j,temp2.imag)
    Kimag[smallX,smallY]=temp4.integral(0,kx,0,ky)
コード例 #3
0
 def __init__(self, theGrid, phiGrid, tauth, tauph):
     self.theGrid = theGrid
     self.phiGrid = phiGrid
     self.tauTheFuncs = [
         rbs(theGrid, phiGrid, tauth[..., i]) for i in [0, 1, 2]
     ]
     self.tauPhiFuncs = [
         rbs(theGrid, phiGrid, tauph[..., i]) for i in [0, 1, 2]
     ]
コード例 #4
0
 def get(self, rs, redshift, logdens):
     xic = np.array([
         rbs(-self.logdens_list, -self.redshift_list,
             self.pred_table[:, :, i])(-logdens, -redshift)
         for i in range(66)
     ])
     return np.exp(ius(self.logrscale, xic, ext=3)(np.log(rs)))
コード例 #5
0
ファイル: utils.py プロジェクト: yangyangoceanographer/popy
def line_sample2d(x,y,z,x1,y1):
    """sample z along a path [x,y]
    x,y in pixel coordinates"""
    from scipy.interpolate import RectBivariateSpline as rbs
    # Extract the values along the line, using cubic interpolation
    f = rbs(x,y,z.T)
    return f.ev(x1,y1)
コード例 #6
0
 def getNoInterpol(self, redshift, logdens):
     xic = np.array([
         rbs(-self.logdens_list, -self.redshift_list,
             self.pred_table[:, :, i])(-logdens, -redshift, grid=False)
         for i in range(66)
     ])
     return np.exp(xic)
コード例 #7
0
ファイル: ee.py プロジェクト: RobieH/honours
def main():
    '''Using numba to try and make this as fast as possible, still very slow. '''

    obj=plt.imread('jerichoObject.bmp')
    ref=plt.imread('jerichoRef.bmp')

    img=obj-ref

    K=np.empty(img.shape)+0j
    temp=np.empty(img.shape)+0j

    wavelength=405e-9
    k=2*np.pi/(wavelength)
    z=250e-6
    #z=13e-3-250e-6

    distX=6e-6
    distY=6e-6

    n=float(img.shape[0])
    m=float(img.shape[1])

    a = np.arange(0,n)
    b = np.arange(0,m)

    first=time.time()

    for i in xrange(K.shape[0]):
        for j in xrange(K.shape[1]):

            print(i,j)
            '''create an r vector '''
            r=(i*distX,j*distY,z)

            for x in xrange(img.shape[0]):
                for y in xrange(img.shape[1]):
                    '''create an ksi vector, then calculate
                       it's norm, and the dot product of r and ksi'''
                    ksi=(x*distX,y*distY,z)
                    ksiNorm=np.linalg.norm(ksi)
                    ksiDotR=float(np.dot(ksi,r))

                    '''calculate the integrand'''
                    temp[x,y]=img[x,y]*np.exp(1j*k*ksiDotR/ksiNorm)

            '''interpolate so that we can do the integral and take the integral'''
            temp2=rbs(a,b,temp.real)
            K[i,j]=temp2.integral(0,n,0,m)

            timeTook=time.time()-first

            print(timeTook)


    K.dump('K.dat')

    kInt=K.real*K.real+K.imag*K.imag

    plt.imshow(kInt,cmap=plt.cm.Greys_r)
コード例 #8
0
ファイル: zernike.py プロジェクト: GangWithWind/EXAOSIM
    def __init__(self, phase, pix=True):
        self.data0 = phase

        sz = phase.shape[0]
        x = np.arange(sz) - (sz - 1) / 2
        if not pix:
            x = x / (sz - 1) * 2

        self.fun = rbs(x, x, phase)
コード例 #9
0
    def set_cosmology(self, cosmo):
        coeff_rec = np.dot(self.gps.predict(np.atleast_2d(
            cosmo.get_cosmology())), self.eigdata).reshape(21, 41)

        #pred_table = []
        # for i in range(21):
        #	self.spl[1][:-4] = coeff_rec[i,]
        #	pred_table.append(interpolate.splev(self.logxscale, self.s1pl))
        self.xinl = rbs(-self.redshift_list, self.logxscale, coeff_rec)
コード例 #10
0
def interpolate(array, Nx, Ny, Lx, Ly, newNx, newNy, xmin=0, ymin=0):
    from scipy.interpolate import RectBivariateSpline as rbs
    x = np.linspace(xmin, Lx, Nx)
    y = np.linspace(ymin, Ly, Ny)
    newx = np.linspace(xmin, Lx, newNx)
    newy = np.linspace(ymin, Ly, newNy)
    interpolatedobject = rbs(x, y, array)
    # evaluate interpolatedobject at newx, newy points
    return interpolatedobject.__call__(newx, newy)
コード例 #11
0
 def set_cosmology(self, cosmo):
     self.cosmo_now = cosmo
     cpara = cosmo.get_cosmology()
     self.coeff_rec = np.dot(self.gps.predict(np.atleast_2d(cpara)),
                             self.eigdata).reshape(21, 13, 3)
     self.coeff_rec_dm = np.dot(self.gps_dm.predict(np.atleast_2d(cpara)),
                                self.eigdata_dm).reshape(21, 2)
     self.sd0 = self.sd.get(cosmo)
     # self.D0s = np.array([cosmo.Dgrowth_from_z(z) for z in self.redshift_list])
     self.coeff1_spline = rbs(-self.redshift_list, -self.logdens_list,
                              self.coeff_rec[:, :, 0])
     self.coeff2_spline = rbs(-self.redshift_list, -self.logdens_list,
                              self.coeff_rec[:, :, 1])
     self.coeff3_spline = rbs(-self.redshift_list, -self.logdens_list,
                              self.coeff_rec[:, :, 2])
     self.coeff2_spline_dm = ius(-self.redshift_list, self.coeff_rec_dm[:,
                                                                        0])
     self.coeff3_spline_dm = ius(-self.redshift_list, self.coeff_rec_dm[:,
                                                                        1])
コード例 #12
0
 def getNoInterpol(self, redshift, logdens1, logdens2):
     sindex = self.redshift_to_index(redshift)
     # dindex1 = self.logdens_to_index(logdens1)
     # dindex2 = self.logdens_to_index(logdens2)
     # ins = [dindex1*np.ones(21),dindex2*np.ones(21),sindex*np.ones(21),range(21)]
     if sindex <= 0:
         s0 = 0
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1,
                                            -logdens2,
                                            grid=False) for i in range(21)
         ])
         return xia0
     elif sindex >= 20:
         s0 = 20
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1,
                                            -logdens2,
                                            grid=False) for i in range(21)
         ])
         return xia0
     else:
         s0 = int(sindex)
         s1 = s0 + 1
         u = sindex - s0
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1,
                                            -logdens2,
                                            grid=False) for i in range(21)
         ])
         xia1 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s1, i])(-logdens1,
                                            -logdens2,
                                            grid=False) for i in range(21)
         ])
         xia = (1 - u) * xia0 + u * xia1
         return xia
コード例 #13
0
def get_xiauto_mass_avg(emu, rs, M1min, M1max, M2min, M2max, z):
    '''
    Averages the halo-halo correlation function over mass ranges to return the weighted-by-mass-function mean version
    '''
    from scipy.interpolate import InterpolatedUnivariateSpline as ius
    from scipy.interpolate import RectBivariateSpline as rbs
    from scipy.integrate import dblquad

    # Parameters
    epsabs = 1e-3  # Integration accuracy
    nM = 6  # Number of halo-mass bins in each of M1 and M2 directions

    # Calculations
    nr = len(rs)

    # Number densities of haloes in each sample
    n1 = ndenshalo(emu, M1min, M1max, z)
    n2 = ndenshalo(emu, M2min, M2max, z)

    # Arrays for halo masses
    M1s = mead.logspace(M1min, M1max, nM)
    M2s = mead.logspace(M2min, M2max, nM)

    # Get mass function interpolation
    Ms = emu.massfunc.Mlist
    dndM = emu.massfunc.get_dndM(z)
    log_dndM_interp = ius(np.log(Ms), np.log(dndM))

    # Loop over radii
    xiauto_avg = np.zeros((nr))
    for ir, r in enumerate(rs):

        # Get correlation function interpolation
        # Note that this is not necessarily symmetric because M1, M2 run over different ranges
        xiauto_mass = np.zeros((nM, nM))
        for iM1, M1 in enumerate(M1s):
            for iM2, M2 in enumerate(M2s):
                xiauto_mass[iM1, iM2] = emu.get_xiauto_mass(r, M1, M2, z)
        xiauto_interp = rbs(np.log(M1s), np.log(M2s), xiauto_mass)

        # Integrate interpolated functions
        xiauto_avg[ir], _ = dblquad(
            lambda M1, M2: xiauto_interp(np.log(M1), np.log(M2)) * np.exp(
                log_dndM_interp(np.log(M1)) + log_dndM_interp(np.log(M2))),
            M1min,
            M1max,
            lambda M1: M2min,
            lambda M1: M2max,
            epsabs=epsabs)

    return xiauto_avg / (n1 * n2)
コード例 #14
0
ファイル: streamfunction.py プロジェクト: tokasamwin/Nova
 def upsample(self, sample):
     if sample > 1:
         '''
         EQ(self,n=sample*self.n)
         self.space()
         '''
         from scipy.interpolate import RectBivariateSpline as rbs
         sample = np.int(np.float(sample))
         interp_psi = rbs(self.r, self.z, self.psi)
         self.nr, self.nz = sample * self.nr, sample * self.nz
         self.r = np.linspace(self.r[0], self.r[-1], self.nr)
         self.z = np.linspace(self.z[0], self.z[-1], self.nz)
         self.psi = interp_psi(self.r, self.z, dx=0, dy=0)
         self.space()
コード例 #15
0
 def get(self, rs, redshift, logdens1, logdens2):
     sindex = self.redshift_to_index(redshift)
     # dindex1 = self.logdens_to_index(logdens1)
     # dindex2 = self.logdens_to_index(logdens2)
     # ins = [dindex1*np.ones(21),dindex2*np.ones(21),sindex*np.ones(21),range(21)]
     if sindex <= 0:
         s0 = 0
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1, -logdens2)
             for i in range(21)
         ])
         return ius(self.logrscale, xia0, ext=3)(np.log(rs))
     elif sindex >= 20:
         s0 = 20
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1, -logdens2)
             for i in range(21)
         ])
         return ius(self.logrscale, xia0, ext=3)(np.log(rs))
     else:
         s0 = int(sindex)
         s1 = s0 + 1
         u = sindex - s0
         xia0 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s0, i])(-logdens1, -logdens2)
             for i in range(21)
         ])
         xia1 = np.array([
             rbs(-self.logdens_list, -self.logdens_list,
                 self.xih_mat[:, :, s1, i])(-logdens1, -logdens2)
             for i in range(21)
         ])
         xia = (1 - u) * xia0 + u * xia1
         return ius(self.logrscale, xia, ext=3)(np.log(rs))
コード例 #16
0
def rectGridInt(data, grid1Col, grid2Col, newGrid1, newGrid2):
    g1 = np.unique(data[:,grid1Col])
    g2 = np.unique(data[:,grid2Col])
    c1 = g1.shape[0]
    c2 = g2.shape[0]

    ng1 = np.linspace(g1.min(),g1.max(),newGrid1)
    ng2 = np.linspace(g2.min(),g2.max(),newGrid2)
    ng1m, ng2m = np.meshgrid(ng1, ng2, indexing='ij')

    result = []
    for i in range(data.shape[1]):
        if i==grid1Col:
            res = ng1m.reshape(-1)
        elif i==grid2Col:
            res = ng2m.reshape(-1)
        else:
            res = rbs(g1,g2,data[:,i].reshape(c1,c2))(ng1, ng2).reshape(-1)
        result.append(res)
    return np.column_stack(result)
コード例 #17
0
ファイル: seds.py プロジェクト: rit-rsz/bethermin12_sim
    def __init__(self, Om0=0.315, H0=67.7, zmin=0.5, zmax=7.0, ninterp=100):
        """ Initializer

        Parameters
        ----------
        Om0: float
          Matter density parameter

        H0: float
          Hubble constant, in km / s / Mpc

        zmin: float
          Minimum redshift supported.

        zmax: float
          Maximum redshift supported.

        ninterp: int
          Number of interpolation points for luminosity distance.

        """

        from astropy.cosmology import FlatLambdaCDM
        from astropy.units import Quantity
        import astropy.io.fits as fits
        from pkg_resources import resource_filename
        from scipy.interpolate import RectBivariateSpline as rbs
        from scipy.interpolate import interp1d

        self._zmin = float(zmin)
        self._zmax = float(zmax)
        self._Om0 = float(Om0)
        self._H0 = float(H0)
        self._ninterp = int(ninterp)

        if self._zmin == self._zmax:
            raise ValueError("No range between zmin and zmax")
        if self._zmin > self._zmax:
            self._zmin, self._zmax = self._zmax, self._zmin
        if self._zmin < 0.0:
            raise ValueError("zmin must be >= 0: {:f}".format(self._zmin))
        if self._Om0 <= 0.0:
            raise ValueError("Om0 must be positive: {:f}".format(self._Om0))
        if self._H0 <= 0.0:
            raise ValueError("H0 must be positive: {:f}".format(self._H0))
        if self._ninterp <= 0:
            raise ValueError("Ninterp must be > 0: {:d}".format(self._ninterp))

        # Set up luminosity distance interpolant.  We actually
        # interpolate log((1+z) / (4 pi d_L^2)) in log(1+z)
        cos = FlatLambdaCDM(H0=self._H0, Om0=self._Om0, Tcmb0=0.0, Neff=0.0)
        zrange = np.linspace(self._zmin, self._zmax, self._ninterp)
        mpc_in_cm = 3.0857e24
        prefac = 1.0 / (4 * math.pi * mpc_in_cm**2)
        lumdist = cos.luminosity_distance(zrange)
        if isinstance(lumdist, Quantity):
            lumdist = lumdist.value
        dlval = prefac * (1.0 + zrange) / lumdist**2
        self._dlfac = interp1d(np.log(1 + zrange), np.log(dlval))

        # Read in the data products, and set up interpolations on them
        sb_tpl = resource_filename(__name__, 'resources/SED_sb.fits')
        hdu = fits.open(sb_tpl)
        dat = hdu['SEDS'].data
        hdu.close()
        self._sblam = dat['LAMBDA'][0]
        self._sbumean = dat['UMEAN'][0]
        arg = np.argsort(self._sbumean)
        self._sbumean = self._sbumean[arg]
        self._sbrange = np.array([self._sbumean[0], self._sbumean[-1]])
        self._sbseds = dat['SEDS'][0, :, :].transpose()[arg, :]
        self._sbinterp = rbs(self._sbumean,
                             self._sblam,
                             self._sbseds,
                             kx=1,
                             ky=1)

        ms_tpl = resource_filename(__name__, 'resources/SED_ms.fits')
        hdu = fits.open(ms_tpl)
        dat = hdu['SEDS'].data
        hdu.close()
        self._mslam = dat['LAMBDA'][0]
        self._msumean = dat['UMEAN'][0]
        arg = np.argsort(self._msumean)
        self._msumean = self._msumean[arg]
        self._msrange = np.array([self._msumean[0], self._msumean[-1]])
        self._msseds = dat['SEDS'][0, :, :].transpose()[arg, :]
        self._msinterp = []
        self._msinterp = rbs(self._msumean,
                             self._mslam,
                             self._msseds,
                             kx=1,
                             ky=1)
コード例 #18
0
ファイル: chabrier.py プロジェクト: rschroder/ongp
 def get_sp_he(self, lgp, lgt):
     return rbs(self.logpvals, self.logtvals, self.data['he']['sp'],
                **self.spline_kwargs)(lgp, lgt, grid=False)
コード例 #19
0
def metastable_config(b,
                      a,
                      xsi,
                      Ke,
                      Ks,
                      taup,
                      disl_type,
                      wpfunc=None,
                      wmin=5,
                      wmax=1000,
                      tau_frac_min=0.05,
                      tau_frac_max=0.3,
                      ntau=20,
                      in_GPa=True,
                      nh=100,
                      nw=200,
                      fit_kocks=True):
    '''Finds the kink-pair geometry (h, w) at which the kink-pair energy is
    at a critical point, ie dH/dw = dH/dh = 0
    
    <xsi>: dislocation width (float)
    <wmin>: minimum kink-pair separation, in angstroms 
    <wmax>: maximum kink-pair separation
    <a>: dislocation spacing, in angstroms. If <None>, use the Burgers vector 
    <tau_frac_min>: smallest stress at which to calculate metastable shape, as
    a fraction of <taup>
    <tau_frac_max>: largest stress, as fraction of <taup>
    <ntau>; <nh>; <nw>: number of stress, height, and width increments to use
    '''

    if wpfunc is None:
        # construct a simple sine potential using the Peierls stress and
        # dislocation geometry
        wpmax = taup * b**2 / np.pi
        if in_GPa:
            wpmax /= 160.2

        wpfunc = simple_wp(wpmax, a)

    # calculate the core size parameter
    if disl_type == 'edge':
        rho = self_consistent_rho(Ke, Ks, b, a, taup)
    elif disl_type == 'screw':
        rho = self_consistent_rho(Ks, Ke, b, a, taup)

    # create kink-pair energy function
    kp_func = DH_kink_pair_mappable(disl_type, Ke, Ks, b, a, rho, wpfunc, taup)

    # calculate critical shape in specified range of stresses
    critical_values = []
    stresses = np.linspace(tau_frac_min * taup, tau_frac_max * taup, ntau)
    w = np.linspace(wmin, wmax, nw)
    h = np.linspace(0.1, a, nh)
    for s in stresses:
        # calculate kink-pair energies at stress <s> for all kink-pair widths
        # and heights
        Ekp = np.zeros((len(w), len(h)))
        for i, wi in enumerate(w):
            for j, hj in enumerate(h):
                Ekp[i, j] = kp_func(hj, wi, s)

        # calculate width and height derivates of kink-pair energy
        dEdw, dEdh = np.gradient(Ekp)

        # construct spline-fits for derivative functions and find saddle point
        fw = rbs(w, h, dEdw)
        fh = rbs(w, h, dEdh)
        fwi = lambda x: abs(fw(*x)[0, 0])
        fhi = lambda x: abs(fh(*x)[0, 0])
        fci = lambda x: fhi(x) + fwi(x)
        optvals = opt.brute(fci, [[2 * wmin, wmax], [0.2 * a, a]])

        critical_values.append([s] + list(optvals) +
                               [kp_func(optvals[1], optvals[0], s)])

    critical_values = np.array(critical_values)

    # fit the shape of the activation enthalpy, if specified by user
    if fit_kocks:
        par, err = kocks_fit(critical_values[:, 0], critical_values[:, -1],
                             taup)
        return critical_values, par, err
    else:
        return critical_values, None, None
コード例 #20
0
ファイル: pes.py プロジェクト: tkjacobsen/pardyn
 def __init__(self, x, y, k=2):
     self.kx0, self.kx1 = k, k
     self.rbs = rbs(x[0], x[1], y, kx=k, ky=k)
コード例 #21
0
ファイル: chabrier.py プロジェクト: rschroder/ongp
 def get_st_h(self, lgp, lgt):
     return rbs(self.logpvals, self.logtvals, self.data['h']['st'],
                **self.spline_kwargs)(lgp, lgt, grid=False)
コード例 #22
0
ファイル: beginnings.py プロジェクト: wesleybowman/honours




#temp[xx,yy]=img[xx,yy]*(L/Rprime)**4*np.exp((1j*k*z*Rprime)/L)

kx,ky=K.shape
i=np.arange(0,kx)
j=np.arange(0,ky)

smallX,smallY=np.mgrid[0:kx,0:ky]

#temp2=temp[xx,yy]*np.exp((1j*k*(smallX*Xprime+smallY*Yprime))/L)
temp2=ne.evaluate('temp*exp((1j*k*(smallX*Xprime+smallY*Yprime))/L)')
temp3=rbs(i,j,temp2.real)
Kreal[smallX,smallY]=temp3.integral(0,kx,0,ky)

temp4=rbs(i,j,temp2.imag)
Kimag[smallX,smallY]+=temp4.integral(0,kx,0,ky)

Kreal=Kreal.real
Kimag=Kimag.real*1j
K=Kreal+Kimag
Kint=ne.evaluate('K.real*K.real+K.imag*K.imag')

print(K)
print(Kint)
K.dump('K.dat')

print(time.time()-first)
コード例 #23
0
work_dir = os.getcwd()

dirs = pd.read_excel(work_dir + "\\ModelData.xlsx", sheetname='Dir')
dirs = dirs.set_index('Variable').T

grid_dir = grid_3D_path = dirs['grid_3D_path'].values[0]
data_dir = data_dir = dirs['data_dir'].values[0]
elevation_data = data_dir + '\\Mt. Apo Elevations\\Elevations.xyz'

data = pd.read_csv(elevation_data, names=['x', 'y', 'z'], header=None, sep=' ')

x = data['x'].unique()
y = data['y'].unique()
z = data['z'].values.reshape(len(x), len(y), order='F')

rbsf = rbs(x, y, z)

dat = fdata()
dat.grid.read(grid_dir)

node_array = [(n.index, n.position[0], n.position[1], n.position[2])
              for n in dat.grid.nodelist]
node_df = pd.DataFrame(node_array, columns=['ni', 'x', 'y', 'z'])

#find inactive blocks
node_df['active'] = node_df.apply(lambda i: i['z'] < rbsf.ev(i['x'], i['y']),
                                  axis=1)

x_columns = node_df['x'].unique()
node_df['is_top'] = False
コード例 #24
0
def affine_lk_tracker(img, tmp, rect, warp_prev):
    """
    Method to track a template frame in a new image frame using lucas kanade tracking algorithm
    :param img: current image frame to track template
    :param tmp: template image frame
    :param rect: bonding box coordinates marking the template region in template image frame
    :param warp_prev: warping parameters from warping of previous image frame
    :return: warping parameters of current image frame
    """
    # Get start and end point of the bounding rectangle in the template image
    start_point = rect[0], rect[1]
    opp_corner_point = rect[0] + rect[2], rect[1] + rect[3]
    # Define initial error and convergence threshold
    error = 1
    convergence_threshold = 0.001
    # Get x and y values
    x = np.arange(0, tmp.shape[0], 1)
    y = np.arange(0, tmp.shape[1], 1)
    # Interpolate points from top-left to bottom-right corner of the bounding box
    a = np.linspace(start_point[0], opp_corner_point[0], 87)
    b = np.linspace(start_point[1], opp_corner_point[1], 36)
    # Get a mesh grid from these interpolated points
    mesh_a, mesh_b = np.meshgrid(a, b)
    # Get bivariate spline of template image and evaluate intensities over interpolated points
    spline_tmp = rbs(x, y, tmp)
    intensities_tmp = spline_tmp.ev(mesh_b, mesh_a)
    # Adjust brightness scale
    # Uncomment to scale brightness in the roi region
    # img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]] = adjust_brightness(img, tmp, rect)
    # Get bivariate spline and gradient of current image frame
    spline_img = rbs(x, y, img)
    grad_y, grad_x = np.gradient(img)
    # Get bivariate spine of gradient
    spline_grad_x = rbs(x, y, grad_x)
    spline_grad_y = rbs(x, y, grad_y)
    # Define a 4x4 jacobian
    jacobian = np.array([[1, 0], [0, 1]])
    count = 0
    # Iterate until convergence
    while np.linalg.norm(error) > convergence_threshold:
        # Interpolate warping points from top-left to bottom-right corner
        warp_a = np.linspace(start_point[0] + warp_prev[0],
                             opp_corner_point[0] + warp_prev[0], 87)
        warp_b = np.linspace(start_point[1] + warp_prev[1],
                             opp_corner_point[1] + warp_prev[1], 36)
        # Get mesh from these interpolated points
        mesh_warp_a, mesh_warp_b = np.meshgrid(warp_a, warp_b)
        # Evaluate intensities over interpolated points in x,y direction for gradients
        intensities_grad_x = spline_grad_x.ev(mesh_warp_b, mesh_warp_a)
        intensities_grad_y = spline_grad_y.ev(mesh_warp_b, mesh_warp_a)
        # Stack the intensities from both the gradients
        intensities_grad = np.vstack(
            (intensities_grad_x.ravel(), intensities_grad_y.ravel())).T
        # Evaluate intensities over the interpolated points for current image frame
        intensities_img = spline_img.ev(mesh_warp_b, mesh_warp_a)
        # Calculate the hessian matrix
        hessian = intensities_grad @ jacobian
        hess = hessian.T @ hessian
        # Calculate the change in intensities from the template image to the current image frame
        change = (intensities_tmp - intensities_img).reshape(-1, 1)
        # Uncomment to add huber loss implementation
        change = get_huber_loss(change)
        # Evaluate errors
        try:
            error = np.linalg.inv(hess) @ hessian.T @ change
            warp_prev[0] += error[0, 0]
            warp_prev[1] += error[1, 0]
        except np.linalg.LinAlgError:
            break
        # Increment iteration count
        count += 1
        # Terminate convergence after 200 iterations
        if count > 200:
            break

    return warp_prev
コード例 #25
0
ファイル: dd.py プロジェクト: RobieH/honours
    r=(i*distX,j*distY,z)

    for (x,y),value in np.ndenumerate(img):
        ksi=(x*distX,y*distY,z)
        ksiNorm=np.linalg.norm(ksi)
        ksiDotR=np.dot(ksi,r)

        temp[x,y]=img[x,y]*np.exp(1j*k*ksiDotR/ksiNorm)

    temp.dump('temp.dat')
    #tempRavel=temp.ravel()
    #surf=interpol(pts,tempRavel)
    #func=lambda y,x: surf([[x,y]])

    #K[i,j]=dblquad(func,0.0,m,lambda x:0.0, lambda x:n)[0]
    temp2=rbs(a,b,temp.real)
    K[i,j]=temp2.integral(0,n,0,m)

timeTook=time.time()-first

print(timeTook)

K.dump('K.dat')
kInt=K.real*K.real+K.imag*K.imag

plt.imshow(kInt,cmap=plt.cm.Greys_r)
plt.imsave(kInt,cmap=plt.cm.Greys_r)


#a=np.arange(0,n)
#b=np.arange(0,m)
コード例 #26
0
ファイル: jj.py プロジェクト: RobieH/honours
def main(slice,comm,rank,size):

    if rank==0:

        first=time.time()

        obj=plt.imread('jerichoObject.bmp')
        ref=plt.imread('jerichoRef.bmp')

        img=obj-ref

        temp=np.empty(img.shape)+0j
        K=np.empty(img.shape)+0j
        Kreal=np.empty(img.shape)+0j
        Kimag=np.empty(img.shape)+0j

        wavelength=405e-9
        k=2*np.pi/(wavelength)

        ''' L is the distance from the source to the screen '''
        L=13e-3

        n,m=img.shape

        a,b=np.mgrid[0:n,0:m]

        r=ne.evaluate('sqrt(L*L+a*a+b*b)')
        Xprime=ne.evaluate('(a*L)/r')
        Yprime=ne.evaluate('(b*L)/r')
        Rprime=ne.evaluate('(L*L)/r')
        xx=ne.evaluate('(Xprime*L)/Rprime')
        yy=ne.evaluate('(Yprime*L)/Rprime')
        xx=xx.astype(int)
        yy=yy.astype(int)

        ''' z is the slice we want to look at '''
        #z=250e-6
        #z=13e-3-250e-6
        #z=13e-3
        z=slice

        print('Distance: {0}'.format(z))

        temp[xx,yy]=ne.evaluate('img*(L/Rprime)**4*exp((1j*k*z*Rprime)/L)')

        kx,ky=K.shape

        ii=np.arange(kx)
        jj=np.arange(ky)

        print('Total number of Processors in use: {0}'.format(comm.size))

    else:
        kx=None
        ky=None
        L=None
        Xprime=None
        Yprime=None
        temp=None
        Kreal=None
        Kimag=None
        ii=None
        jj=None
        k=None

    comm.Barrier()

    #print('rank:{0} size:{1} \n'.format(comm.rank,comm.size))

    comm.Barrier()

    if rank==0: print('Broadcasting')

    kx=comm.bcast(kx,root=0)
    ky=comm.bcast(ky,root=0)
    L=comm.bcast(L,root=0)
    Xprime=comm.bcast(Xprime,root=0)
    Yprime=comm.bcast(Yprime,root=0)
    temp=comm.bcast(temp,root=0)
    Kreal=comm.bcast(Kreal,root=0)
    Kimag=comm.bcast(Kimag,root=0)
    ii=comm.bcast(ii,root=0)
    jj=comm.bcast(jj,root=0)
    k=comm.bcast(k,root=0)

    if rank==0: print('Done broadcasting')

    comm.Barrier()

    rows = [comm.rank + comm.size * aa for aa in range(int(kx/comm.size)+1) if comm.rank + comm.size*aa < kx]

    rows2 = [comm.rank + comm.size * bb for bb in range(int(ky/comm.size)+1) if comm.rank + comm.size*bb < ky]

    comm.Barrier()

    count=0
    countTot=[]

    for smallX in rows:
        for smallY in rows2:
            #print(smallX,smallY)
            temp2=ne.evaluate('temp*exp((1j*k*(smallX*Xprime+smallY*Yprime))/L)')
            temp3=rbs(ii,jj,temp2.real)
            Kreal[smallX,smallY]=temp3.integral(0,kx,0,ky)
            temp4=rbs(ii,jj,temp2.imag)
            Kimag[smallX,smallY]=temp4.integral(0,kx,0,ky)

            count+=1
            countTot=comm.gather(count)

            try:
                print('Done with: {0} elements'.format(sum(countTot)))
            except:
                pass

    comm.Barrier()

    if rank==0:
        Kreal.dump('Kreal{}.dat'.format(slice))
        Kimag.dump('Kimag{}.dat'.format(slice))

    print(time.time()-first)
コード例 #27
0
ファイル: seds.py プロジェクト: aconley/bethermin12_sim
    def __init__(self, Om0=0.315, H0=67.7, zmin=0.5,
                 zmax=7.0, ninterp=100):
        """ Initializer

        Parameters
        ----------
        Om0: float
          Matter density parameter

        H0: float
          Hubble constant, in km / s / Mpc

        zmin: float
          Minimum redshift supported.

        zmax: float
          Maximum redshift supported.

        ninterp: int
          Number of interpolation points for luminosity distance.

        """

        from astropy.cosmology import FlatLambdaCDM
        from astropy.units import Quantity
        import astropy.io.fits as fits
        from pkg_resources import resource_filename
        from scipy.interpolate import RectBivariateSpline as rbs
        from scipy.interpolate import interp1d

        self._zmin = float(zmin)
        self._zmax = float(zmax)
        self._Om0 = float(Om0)
        self._H0 = float(H0)
        self._ninterp = int(ninterp)

        if self._zmin == self._zmax:
            raise ValueError("No range between zmin and zmax")
        if self._zmin > self._zmax:
            self._zmin, self._zmax = self._zmax, self._zmin
        if self._zmin < 0.0:
            raise ValueError("zmin must be >= 0: {:f}".format(self._zmin))
        if self._Om0 <= 0.0:
            raise ValueError("Om0 must be positive: {:f}".format(self._Om0))
        if self._H0 <= 0.0:
            raise ValueError("H0 must be positive: {:f}".format(self._H0))
        if self._ninterp <= 0:
            raise ValueError("Ninterp must be > 0: {:d}".format(self._ninterp))

        # Set up luminosity distance interpolant.  We actually
        # interpolate log((1+z) / (4 pi d_L^2)) in log(1+z)
        cos = FlatLambdaCDM(H0=self._H0, Om0=self._Om0, Tcmb0=0.0, Neff=0.0)
        zrange = np.linspace(self._zmin, self._zmax, self._ninterp)
        mpc_in_cm = 3.0857e24
        prefac = 1.0 / (4 * math.pi * mpc_in_cm**2)
        lumdist = cos.luminosity_distance(zrange)
        if isinstance(lumdist, Quantity):
            lumdist = lumdist.value
        dlval = prefac * (1.0 + zrange) / lumdist**2
        self._dlfac = interp1d(np.log(1 + zrange), np.log(dlval))

        # Read in the data products, and set up interpolations on them
        sb_tpl = resource_filename(__name__, 'resources/SED_sb.fits')
        hdu = fits.open(sb_tpl)
        dat = hdu['SEDS'].data
        hdu.close()
        self._sblam = dat['LAMBDA'][0]
        self._sbumean = dat['UMEAN'][0]
        arg = np.argsort(self._sbumean)
        self._sbumean = self._sbumean[arg]
        self._sbrange = np.array([self._sbumean[0], self._sbumean[-1]])
        self._sbseds = dat['SEDS'][0, :, :].transpose()[arg, :]
        self._sbinterp = rbs(self._sbumean, self._sblam, self._sbseds,
                             kx=1, ky=1)

        ms_tpl = resource_filename(__name__, 'resources/SED_ms.fits')
        hdu = fits.open(ms_tpl)
        dat = hdu['SEDS'].data
        hdu.close()
        self._mslam = dat['LAMBDA'][0]
        self._msumean = dat['UMEAN'][0]
        arg = np.argsort(self._msumean)
        self._msumean = self._msumean[arg]
        self._msrange = np.array([self._msumean[0], self._msumean[-1]])
        self._msseds = dat['SEDS'][0, :, :].transpose()[arg, :]
        self._msinterp = []
        self._msinterp = rbs(self._msumean, self._mslam, self._msseds,
                             kx=1, ky=1)
コード例 #28
0
 def get_st_h(self, lgp, lgt):
     return rbs(self.logpvals, self.logtvals, self.logs,
                **self.spline_kwargs)(lgp, lgt, dy=1, grid=False)
コード例 #29
0
 def get_logrho_h(self, lgp, lgt):
     return rbs(self.logpvals, self.logtvals, self.logrho,
                **self.spline_kwargs)(lgp, lgt, grid=False)
コード例 #30
0
def RayTracingPropagatorParax2V(x, y, z, Dx, Dy, Dz, zinit, zend, RayMatrix,
                                n):
    """
    This function evaluates the ray tracing of the photons of class 'photon' contained in RayMatrix
    propagating in a medium whose refractive index is in matrix n. 
    The rays propagates in the paraxial approximation along the z axis
    
    NB z is the light propagation axis
    
    x, y, z the coordinates of the the matrix n along which the light is propagating
    zinit is the integration start
    zend is the integration end
    RayMatrix is the matrix of ray elements of class 'photon' described in Classes.py
    n is the refractive index matrix  
    THIS VERSION USE THE SPLINE INTERPOLATION TO EVALUATE THE DERIVATIVES
    """
    import numpy as np
    from scipy.interpolate import RectBivariateSpline as rbs

    init = np.argmin(np.abs(z - zinit))
    stop = np.argmin(np.abs(z - zend))

    for i in range(len(RayMatrix)):
        for j in range(len(RayMatrix[0])):
            print('Ray Tracing')
            print(((len(RayMatrix) - i) / len(RayMatrix)) * 100, '%')
            print('\n')

            for zcount in range(init, stop):
                ycount = np.amin(RayMatrix[i][j].y[zcount] - y)
                ni = rbs(x, y, n[:, :, zcount])
                niz = rbs(x, z, n[:, ycount, :])
                x0 = [RayMatrix[i][j].x[zcount],
                      RayMatrix[i][j].dx[zcount]]  #initial conditions x and x'
                y0 = [RayMatrix[i][j].y[zcount],
                      RayMatrix[i][j].dy[zcount]]  #initial conditions y and y'

                dndx_local = ni(x0[0], y0[0], dx=1)
                dndy_local = ni(x0[0], y0[0], dy=1)
                dndz_local = niz(x0[0], z[zcount], dy=1)
                n_local = ni(x0[0], y0[0])

                xsol = odeint(Fx,
                              x0,
                              z[range(zcount, zcount + 2)],
                              args=(dndz_local / n_local,
                                    -dndx_local / n_local),
                              mxstep=5000000)  #,mxstep=5000000
                ysol = odeint(Fy,
                              y0,
                              z[range(zcount, zcount + 2)],
                              args=(dndz_local / n_local,
                                    -dndy_local / n_local),
                              mxstep=5000000)

                #                if dndx_local!=0:
                #                    print(dndx_local, dndy_local, dndz_local, n_local, xsol[1,:], ysol[1,:], z[zcount])

                RayMatrix[i][j].x[zcount + 1] = xsol[1, 0]
                RayMatrix[i][j].y[zcount + 1] = ysol[1, 0]
                RayMatrix[i][j].z[zcount + 1] = z[zcount + 1]
                RayMatrix[i][j].dx[zcount + 1] = xsol[1, 1]
                RayMatrix[i][j].dy[zcount + 1] = ysol[1, 1]

    return RayMatrix