Beispiel #1
0
 def test_linear_edges(self):
     points, values = self._get_sample_4d()
     interp = RegularGridInterpolator(points, values)
     sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
     wanted = np.asarray([0., 1111.])
     assert_array_almost_equal(interp(sample), wanted)
def Plot_Ewald_triclinic(D, wavelength_angstroms, ucell, factor=3.1, format=True, **kwargs):  # pass full 3d data,SF,wavelength in angstroms

    PLOT_RAD_NEW(D, wavelength_angstroms, ucell, factor=factor, format=format, **kwargs)
    exit()

    if not os.path.exists(path):
        os.makedirs(path)

    X = D[:, 0, 0, 0].copy()
    Y = D[0, :, 0, 1].copy()
    Z = D[0, 0, :, 2].copy()

    NBINSZ = 1 * D[0, 0, :, 2].size
    ZBNS = np.linspace(Z[0], Z[-1], NBINSZ)

    if NBINSRAD > 0:
        XBNSRD = np.linspace(-NBINSRAD, NBINSRAD, num=NBINSRAD*2)
        XBNSRD = np.sqrt(np.abs(XBNSRD))*np.sign(XBNSRD)
        XBNSRD *= (X[-1]/XBNSRD[-1])
    else:
        XBNSRD = X
        print("setting XBNSRD=", X)

    dx1 = X[1 + int(X.shape[0]/2)] - X[int(X.shape[0]/2)]

    SF = D[:, :, :, 3]

    a1 = ucell[0]
    a2 = ucell[1]
    a3 = ucell[2]

    b1 = old_div((np.cross(a2, a3)), (np.dot(a1, np.cross(a2, a3))))
    b2 = old_div((np.cross(a3, a1)), (np.dot(a2, np.cross(a3, a1))))
    b3 = old_div((np.cross(a1, a2)), (np.dot(a3, np.cross(a1, a2))))

    Dnew = np.zeros_like(D)

    for ix in trange(D.shape[0]):
        Dnew[ix, :, :, 0:3] += X[ix]*b1
    for iy in trange(D.shape[1]):
        Dnew[:, iy, :, 0:3] += Y[iy]*b2
    for iz in trange(D.shape[2]):
        Dnew[:, :, iz, 0:3] += Z[iz]*b3

    D[..., :3] = Dnew[..., :3]

    K_ES = 2.0*math.pi/wavelength_angstroms  # calculate k for incident xrays in inverse angstroms

    # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.html#scipy.interpolate.RegularGridInterpolator
    # Notes
    # Contrary to LinearNDInterpolator and NearestNDInterpolator, RegularGridInterpolator class avoids expensive triangulation of the input data by taking advantage of the regular grid structure.
    # this is why this style of interpolation is so slow

    XGD = D[:, :, :, 0]  # X spatial grid view
    YGD = D[:, :, :, 1]
    ZGD = D[:, :, :, 2]
    VGD = D[:, :, :, 3]

    DC = D[:, :, :, 0:3]

    DR = DC.reshape(DC.size/3, 3)

    # check if fast interpolation can be used
    lbuf = True
    for i in range(3):
        for j in range(i + 1, 3):
            if ucell[i, j] != 0 or ucell[j, i] != 0:
                lbuf = False

    print("Interpolating grid...")

    if ucell[0, 0] == ucell[1, 1] and ucell[0, 0] == ucell[2, 2] and lbuf:

        print("using fast interpolation for orthorhombic cell")
        ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)

    else:

        print("Interpolating non-orthorhombic cell")
        dtime = 480.0 * XGD.size / (98 * 98 * 99)  # empirical time estimate

        print("interpolation time estimate: ", round(dtime / 60, 1), " minutes, finishing around ", (
                    datetime.datetime.now() + datetime.timedelta(seconds=dtime)).strftime('%I:%M %p'))

        start = time.time()
        coords = list(zip(XGD.ravel(), YGD.ravel(), ZGD.ravel()))

        if False:
            ES = LinearNDInterpolator(coords, VGD.ravel())
        else:
            ES = NearestNDInterpolator(coords, VGD.ravel())
        end = time.time()

        print("interpolation finished, taking %4.2f seconds" % (end-start))

    xyzpts = np.asarray([])
    print("setting up points for radial integration")
    Scale=1

    if False:
        for ix in trange(D.shape[0]):
            for iy in range(D.shape[1]):
                for iz in range(D.shape[2]):
                    xyzpts.append((D[ix, iy, iz, 0], D[ix, iy, iz, 1], D[ix, iy, iz, 2]))
    else:
        XPTS = np.linspace(D[0, 0, 0, 0], D[-1, 0, 0, 0], Scale * D.shape[0], dtype=np.float16)
        YPTS = np.linspace(D[0, 0, 0, 1], D[0, -1, 0, 1], Scale * D.shape[1], dtype=np.float16)
        ZPTS = np.linspace(D[0, 0, 0, 2], D[0, 0, -1, 2], Scale * D.shape[2], dtype=np.float16)
        print("mesh")
        xyzpts = np.meshgrid(XPTS, YPTS, ZPTS)
        print("stack")
        xyzpts = np.stack(xyzpts, -1).reshape(-1, 3)
        print("done")

    xyzpts = np.reshape(D[:, :, :, :3], (D.shape[0]*D.shape[1]*D.shape[2], 3))  # 5000x faster than above loop

    NSP = 20
    NSP = np.minimum(NSP, xyzpts.shape[0])  # split into at most 20 chunks before processing to limit memory usage

    xyzpieces = np.array_split(xyzpts, NSP)
    EWDxyz = np.asarray([])
    print("interpolating")
    for i in tqdm.tqdm(xyzpieces):
        buf = ES(i)
        EWDxyz = np.append(EWDxyz, buf, axis=0)

    print("EWD done")

    rpts = np.sqrt(xyzpts[:, 0]**2.0 + xyzpts[:, 1]**2.0)

    Hcount, XEC, YEC = np.histogram2d(rpts, xyzpts[:, 2], bins=(XBNSRD, ZBNS))

    Hval, XEV, YEV = np.histogram2d(rpts, xyzpts[:, 2], weights=EWDxyz, normed=False, bins=(XBNSRD, ZBNS))

    switch1 = True

    if switch1:
        Hcount = np.where(Hcount == 0, 1, Hcount)

    Hrz = Hval / Hcount

    if not switch1:
        Hrz = np.ma.masked_invalid(Hrz)

    S1 = np.sum(Hrz)
    S3 = np.sum(Hrz[Hrz.shape[0]/2, :])

    Condition1 = False # Need to figure this out-when should this be true?

    if Condition1:
        for ir in range(1, Hrz.shape[0] / 2 - 1):
            Hrz[-ir + Hrz.shape[0] / 2, :] = Hrz[ir + Hrz.shape[0] / 2,
                                             :]  # this needs to be tested for both even and odd numbers of bins
    else:
        for ir in range(1, Hrz.shape[0] / 2 - 1):
            Hrz[-ir + 2 + Hrz.shape[0] / 2, :] = Hrz[ir + Hrz.shape[0] / 2,
                                                 :]  # this needs to be tested for both even and odd numbers of bins


    S2 = np.sum(Hrz)

    XMG, YMG = np.meshgrid(XEV, YEV)

    plt.pcolormesh(XMG[:-1, :], YMG[:-1, :], np.log10(Hrz.T), vmin=np.amin(np.log10(Hrz)), vmax=np.amax(np.log10(Hrz)))
    plt.savefig(path+"_log_rzplot"+format, dpi=DPI)
    plt.clf()
    print("_log_rzplot saved")

    mn = np.amin(Hrz[np.nonzero(Hrz)])
    Hbuf = np.where(Hrz > 0.0, Hrz, mn)
    Log_HRZ = np.log10(Hbuf)

    plt.pcolormesh(XMG[:-1, :] - dx1 / 2.0, YMG[:-1, :], Log_HRZ.T, vmin=np.amin(Log_HRZ), vmax=np.amax(Log_HRZ),
                   cmap='nipy_spectral')
    plt.colorbar()
    plt.savefig(path + "_log_rzplot" + format, dpi=DPI)
    plt.clf()

    Nx = D.shape[0]
    Ny = D.shape[1]
    Nz = D.shape[2]

    #==============flat and Ewald-corrected plots=================

    xypts = []
    xyflat = []
    for ix in range(D.shape[0]):
        for iy in range(D.shape[1]):
            xp = D[ix, iy, int(Nz/2), 0]
            yp = D[ix, iy, int(Nz/2), 1]

            theta = np.arctan(np.sqrt(xp**2.0 + yp**2.0)/K_ES)
            xypts.append((xp*np.cos(theta), yp*np.cos(theta), K_ES*(1.0 - np.cos(theta))))
            xyflat.append((xp, yp, 0.0))

    xzpts = []
    xzflat = []

    for ix in range(D.shape[0]):
        for iz in range(D.shape[2]):
            xp = D[ix, int(Ny/2), iz, 0]
            zp = D[ix, int(Ny/2), iz, 2]
            theta = np.arctan(np.sqrt(xp**2.0 + yp**2.0)/K_ES)
            xzpts.append((xp*np.cos(theta), K_ES*(1.0-np.cos(theta)), zp*np.cos(theta)))
            xzflat.append((xp, 0.0, zp))

    yzpts = []
    yzflat = []
    for iy in range(D.shape[1]):
        for iz in range(D.shape[2]):
            yp = D[int(Nz/2), iy, iz, 1]
            zp = D[int(Nz/2), iy, iz, 2]
            theta = np.arctan(np.sqrt(yp**2.0 + zp**2.0)/K_ES)
            yzpts.append((K_ES*(1.0-np.cos(theta)), yp*np.cos(theta), zp*np.cos(theta)))
            yzflat.append((0.0, yp, zp))

    xypts = np.asarray(xypts)
    xzpts = np.asarray(xzpts)
    yzpts = np.asarray(yzpts)

    xyflat = np.asarray(xyflat)
    xzflat = np.asarray(xzflat)
    yzflat = np.asarray(yzflat)

    EWDxy = ES(xypts)
    EWDxz = ES(xzpts)
    EWDyz = ES(yzpts)

    EWDxyflat = ES(xyflat)
    EWDxzflat = ES(xzflat)
    EWDyzflat = ES(yzflat)

    EWDxy = EWDxy.reshape(D.shape[0], D.shape[1])
    EWDxz = EWDxz.reshape(D.shape[0], D.shape[2])
    EWDyz = EWDyz.reshape(D.shape[1], D.shape[2])

    EWDxyflat = EWDxyflat.reshape(D.shape[0], D.shape[1])
    EWDxzflat = EWDxzflat.reshape(D.shape[0], D.shape[2])
    EWDyzflat = EWDyzflat.reshape(D.shape[1], D.shape[2])

    title = "Ewald Corrected Structure Factor \n $\lambda=$"+str(wavelength_angstroms)+" $\AA$   $k_{ew}=$"+str(round(K_ES,2))+" $\AA^{-1}$"
    ltitle = 'log ' + title

    xlab = 'x ('+units + ")"
    ylab = 'y ('+units + ")"
    zlab = 'z ('+units + ")"

    fname = "Ewald_"

    iz = 0
    plt.suptitle(title)
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    plt.contourf(D[:, :, iz, 0], D[:, :, iz, 1], EWDxy, contours, **kwargs)
    plt.savefig(path+fname+"xy"+str(iz)+format,dpi=DPI)
    plt.clf()

    lax = ['x', 'y', 'z']

    ewlab = "Ewald"
    flab = "Flat"

    iax1 = 0
    iax2 = 1

    EWDxy = np.ma.masked_invalid(EWDxy)
    EWDxyflat = np.ma.masked_invalid(EWDxyflat)

    EWDxz = np.ma.masked_invalid(EWDxz)
    EWDxzflat = np.ma.masked_invalid(EWDxzflat)

    EWDyz = np.ma.masked_invalid(EWDyz)
    EWDyzflat = np.ma.masked_invalid(EWDyzflat)

    if PLOT_EWALDS:
        csplot_wlog(D[:, :, int(Nz / 2) + 1, iax1], D[:, :, int(Nz / 2) + 1, iax2], EWDxy, contours, ewlab, lax[iax1],
                    lax[iax2], **kwargs)

    csplot_wlog(D[:,:,int(Nz/2)+1,iax1],D[:,:,int(Nz/2)+1,iax2],EWDxyflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)

    iax1 = 0
    iax2 = 2
    if PLOT_EWALDS:
        csplot_wlog(D[:, int(Ny / 2), :, iax1], D[:, int(Ny / 2), :, iax2], EWDxz, contours, ewlab, lax[iax1],
                    lax[iax2], **kwargs)

    csplot_wlog(D[:,int(Ny/2),:,iax1],D[:,int(Ny/2),:,iax2],EWDxzflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)

    iax1 = 1
    iax2 = 2
    if PLOT_EWALDS:
        csplot_wlog(D[int(Nx / 2), :, :, iax1], D[int(Nx / 2), :, :, iax2], EWDyz, contours, ewlab, lax[iax1],
                    lax[iax2], **kwargs)

    csplot_wlog(D[int(Nx/2),:,:,iax1],D[int(Nx/2),:,:,iax2],EWDyzflat,contours,flab ,lax[iax1],lax[iax2],**kwargs)
def angle_average(X, Y, Z, SF, ucell=None):

    ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)

    THETA_BINS_PER_INV_ANG = 20.
    MIN_THETA_BINS = 10  # minimum allowed bins
    RBINS = 100

    if ucell is not None:

        a1 = ucell[0]
        a2 = ucell[1]
        a3 = ucell[2]

        b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
        b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
        b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))

        b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))

    ZBINS = Z.shape[0]  # 400

    XR = (X[-1] - X[0])
    YR = (Y[-1] - Y[0])

    Rmax = min(XR, YR) / 2.0
    Rmax *= 0.95

    rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
    zar = np.linspace(Z[0], Z[-1], ZBINS)

    oa = np.zeros((rarr.shape[0], zar.shape[0]))
    circ = 2.*np.pi*rarr  # circumference

    for ir in range(rarr.shape[0]):

        NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS)  #calculate number of bins at this r
        thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False)  # generate theta array

        t, r, z = np.meshgrid(thetas, rarr[ir], zar)  # generate grid of cylindrical points

        xar = r*np.cos(t)  # set up x,y coords
        yar = r*np.sin(t)

        pts = np.vstack((xar.ravel(), yar.ravel(), z.ravel())).T  # reshape for interpolation

        if ucell is not None:
            # pts = mc_inv(pts, ucell)
            pts = np.matmul(pts, b_inv)

        oa[ir, :] = np.average(ES(pts).reshape(r.shape), axis=1)  # store average values in final array

    mn = np.nanmin(oa)
    oa = np.where(np.isnan(oa), mn, oa)

    rad_avg = np.average(oa)  # ???
    oa /= rad_avg  # normalize

    # set up data for contourf plot by making it symmetrical
    final = np.append(oa[::-1, :], oa[1:], axis=0)  # SF
    rfin = np.append(-rarr[::-1], rarr[1:])  # R
    zfin = np.append(z[:, 0, :], z[1:, 0, :], axis=0)  # Z

    return final, rfin, zfin
Beispiel #4
0
    def build(self):
        """Builds the isotropic mesh size function according
            to the user arguments that were passed.

        Usage
        -------
        >>>> obj = build(self)


        Parameters
        -------
            MeshSizeFunction object

         Returns
        -------
            SeismicMesh.MeshSizeFunction object with specific fields populated:
                self.fh: lambda function w/ scipy.inerpolate.RegularGridInterpolater representing isotropic mesh sizes in domain
                self.fd: lambda function representing the signed distance function of domain

        """
        _bbox = self.bbox
        width = max(_bbox)
        _vp, _nz, _nx = self.__ReadVelocityModel()

        self.vp = _vp
        self.nz = _nz
        self.nx = _nx

        _hmax = self.hmax
        _hmin = self.hmin
        _grade = self.grade

        _wl = self.wl
        _freq = self.freq

        _dt = self.dt
        _cr_max = self.cr_max

        hh_m = np.zeros(shape=(_nz, _nx)) + _hmin
        if _wl > 0:
            print(
                "Mesh sizes with be built to resolve an estimate of wavelength with "
                + str(_wl) + " vertices...")
            hh_m = _vp / (_freq * _wl)
        # enforce min (and optionally max) sizes
        hh_m = np.where(hh_m < _hmin, _hmin, hh_m)
        if _hmax < np.inf:
            print("Enforcing maximum mesh resolution...")
            hh_m = np.where(hh_m > _hmax, _hmax, hh_m)
        # grade the mesh sizes
        if _grade > 0:
            print("Enforcing mesh gradation...")
            hh_m = self.__hj(hh_m, width / _nx, _grade, 10000)
        # adjust based on the CFL limit so cr < cr_max
        if _dt > 0:
            print("Enforcing timestep of " + str(_dt) + " seconds...")
            cr_old = (_vp * _dt) / hh_m
            dxn = (_vp * _dt) / _cr_max
            hh_m = np.where(cr_old > _cr_max, dxn, hh_m)
        # construct a interpolator object to be queried during mesh generation
        z_vec, x_vec = self.__CreateDomainVectors()
        assert np.all(
            hh_m > 0.0), "edge_size_function must be strictly positive."
        interpolant = RegularGridInterpolator((z_vec, x_vec),
                                              hh_m,
                                              bounds_error=False)
        # create a mesh size function interpolant
        self.fh = lambda p: interpolant(p)
        # create a signed distance function
        self.fd = lambda p: self.drectangle(p, _bbox[0], _bbox[1], _bbox[2],
                                            _bbox[3])
        return self
Beispiel #5
0
def linear_bessel_array(wave=0.488,
                        NA_inner=0.44,
                        NA_outer=0.55,
                        spacing=None,
                        n_beam='fill',
                        crop=0.22,
                        tilt=0,
                        shift_x=0,
                        shift_y=0,
                        mag=167.364,
                        pixel=13.662,
                        slm_xpix=1280,
                        slm_ypix=1024,
                        fillchip=0.95,
                        fudge=0.95,
                        show=False,
                        outdir=None,
                        pattern_only=True):

    # auto-choose good spacing
    if not spacing:
        spacing = fudge * wave / NA_inner

    #  to fill the chip
    if n_beam == 'fill' and fillchip:
        n_beam = int(
            np.floor(1 + ((fillchip * (slm_xpix *
                                       (pixel / mag) / 2)) / spacing)))

    # expand cropping for single bessel
    # if n_beam == 1:
    #    crop = min((.0291, crop))

    # Populate real space array
    dx = pixel / mag
    x = np.arange(-(slm_xpix) / 2, (slm_xpix + 1) / 2, 1.0) * dx
    y = x
    # for scipy interpolation functions, we don't use the meshgrid...
    # [x, y] = np.meshgrid(x, y)
    # x_slm = linspace(x[0, 0], x[-1, -1], slm_xpix)
    x_slm = np.linspace(x[0], x[-1], slm_xpix)
    y_slm = x_slm
    # [x_slm, y_slm] = np.meshgrid(x_slm, y_slm)

    # Populate k space array
    dk = 2 * np.pi / (slm_xpix + 1) / dx
    kx = np.arange(-(slm_xpix) / 2, (slm_xpix + 1) / 2, 1.0) * dk
    ky = kx
    [kx, ky] = np.meshgrid(kx, ky)
    kr = np.sqrt(kx * kx + ky * ky)

    # Mask k-space array according to inner and outer NA
    pupil_mask = (kr < NA_outer * (2 * np.pi / wave)) & (kr > NA_inner *
                                                         (2 * np.pi / wave))

    # Generate array of bessel beams by applying phase ramps in k-space
    pupil_field_ideal = pupil_mask.astype(np.complex128)

    f = kx * spacing * np.cos(tilt) + ky * spacing * np.sin(tilt)

    if getattr(sys, 'frozen', False):

        @jit(nopython=True)
        def calc(v, ii):
            A = np.exp(1j * f * ii) + np.exp(-1j * f * ii)
            return v + np.multiply(pupil_mask, A)
    else:

        @jit(nopython=True)
        def calc(v, ii):
            A = np.exp(1j * f * ii) + np.exp(-1j * f * ii)
            return v + np.multiply(pupil_mask, A)

    for ii in range(1, n_beam):
        # A = np.exp(1j * f * ii)
        # B = np.exp(-1j * f * ii)
        # pupil_field_ideal += pupil_mask * (A+B)
        pupil_field_ideal = calc(pupil_field_ideal, ii)
    pupil_field_ideal *= np.exp(1j * (kx * shift_x + ky * shift_y))

    # Ideal SLM field of fourier transform of pupil field
    slm_field_ideal = fftshift(fft2(ifftshift(pupil_field_ideal))).real
    slm_field_ideal /= np.max(np.max(np.abs(slm_field_ideal)))

    # Display ideal intensity at sample (incorporates supersampling)
    if show:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.imshow(np.abs(slm_field_ideal * slm_field_ideal))
        plt.title('Ideal coherent bessel light sheet intensity')
        plt.axis('image')

    # Interpolate back onto SLM pixels and apply cropping factor
    # interpolator = interp2d(x, x, slm_field_ideal)
    interpolator = RectBivariateSpline(x, y, slm_field_ideal)
    slm_pattern = interpolator(x_slm, y_slm)
    # slm_pattern *= np.abs(slm_pattern) > crop
    slm_pattern[np.abs(slm_pattern) < crop] = 0
    eps = np.finfo(float).eps
    slm_pattern = np.sign(slm_pattern + eps) * np.pi / 2 + np.pi / 2

    # Account for rectangular aspect ratio of SLM and convert phase to binary
    low = int(np.floor((slm_xpix / 2) - (slm_ypix / 2) - 1))
    high = int(low + slm_ypix)
    slm_pattern_final = (slm_pattern[low:high, :] / np.pi) != 0

    if outdir is not None:
        outdir = os.path.abspath(os.path.expanduser(outdir))
        if os.path.isdir(outdir):
            namefmt = '{:.0f}_{:2d}b_s{:.2f}_c{:.2f}_na{:.0f}-{:.0f}_x{:02f}_y{:02f}_t{:0.3f}'
            name = namefmt.format(wave * 1000, n_beam * 2 - 1, spacing, crop,
                                  100 * NA_outer, 100 * NA_inner, shift_x,
                                  shift_y, tilt)
            name = name.replace('.', 'p')
            outpath = os.path.join(outdir, name + '.png')

            imout = Image.fromarray(slm_pattern_final.astype(np.uint8) * 255)
            imout = imout.convert('1')
            imout.save(outpath)

    if show:
        plt.figure()
        plt.imshow(slm_pattern, interpolation='nearest')
        plt.title(
            'Cropped and pixelated phase from SLM pattern exiting the polarizing beam splitter'
        )
        plt.axis('image')

        plt.figure()
        plt.imshow(slm_pattern_final, interpolation='nearest', cmap='gray')
        plt.title('Binarized image to output to SLM')

    if pattern_only:
        if show:
            plt.show()
        return slm_pattern_final

    # THIS SHOULD GO INTO SEPERATE FUNCTION

    # Convert SLM pattern to phase modulation
    # Interpolate back so that there is odd number of pixels for FFT calculation (want center at 0)

    # this method uses nearest neighbor like the matlab version
    [xmesh, ymesh] = np.meshgrid(x, y)
    coords = np.array([xmesh.flatten(), ymesh.flatten()]).T
    interpolator = RegularGridInterpolator((x_slm, y_slm),
                                           slm_pattern,
                                           method='nearest')
    slm_pattern_cal = interpolator(coords)  # supposed to be nearest neighbor
    slm_pattern_cal = slm_pattern_cal.reshape(len(x), len(y)).T
    slm_field = np.exp(1j * slm_pattern_cal)

    # at this point, matlab has complex component = 0.0i

    # Compute intensity impinging on annular mask
    pupil_field_impinging = fftshift(fft2(ifftshift(slm_field)))
    # Compute intensity passing through annular mask
    pupil_field = pupil_field_impinging * pupil_mask

    if show:
        plt.figure()
        ax1 = plt.subplot(1, 2, 1)
        plt.imshow(
            (pupil_field_impinging * np.conj(pupil_field_impinging)).real,
            interpolation='nearest',
            cmap='inferno')
        plt.clim(0, (2 * n_beam - 1) * 3e6)
        plt.title('Intensity impinging on annular mask')
        plt.subplot(1, 2, 2, sharex=ax1)
        plt.imshow((pupil_field * np.conj(pupil_field)).real,
                   interpolation='nearest',
                   cmap='inferno')
        plt.clim(0, (2 * n_beam - 1) * 3e6)
        plt.title('Intensity after annular mask')

    # Compute intensity at sample
    field_final = fftshift(fft2(ifftshift(pupil_field)))
    intensity_final = (field_final * np.conj(field_final)).real

    if show:
        plt.figure()
        plt.imshow(intensity_final, interpolation='nearest')
        plt.title('Actual intensity at sample')
        plt.axis('image')

        plt.show()

    pupil_field = np.real(pupil_field * np.conj(pupil_field))
    return slm_pattern_final, intensity_final, pupil_field
def read_hrncst_smoothed(lons_kanto, lats_kanto):
    '''
    Read high resolution nowcast data and output in a smoothed grid

    '''
    # take min-max range with some margin
    delta = 1.0
    lon_min = np.min(lons_kanto) - delta
    lon_max = np.max(lons_kanto) + delta
    lat_min = np.min(lats_kanto) - delta
    lat_max = np.max(lats_kanto) + delta

    nc = netCDF4.Dataset(
        '../data/work/jma_hrncst/4p-hrncstprate_japan0250_2017-08-01_1100utc.nc',
        'r')
    # dimensions
    nx = len(nc.dimensions['LON'])
    ny = len(nc.dimensions['LAT'])
    nt = len(nc.dimensions['TIME'])
    print("dims:", nx, ny, nt)
    # extract variable
    lons = np.array(nc.variables['LON'][:])
    lats = np.array(nc.variables['LAT'][:])
    R = nc.variables['PRATE'][:]  # numpy.ma.core.MaskedArr  ay
    # long_name: precipitation rate
    # units: 1e-3 meter/hour -> [mm/h]
    # scale_factor: 0.01
    # add_offset: 0.0
    id_lons = (lons < lon_max) * (lons > lon_min)
    id_lats = (lats < lat_max) * (lats > lat_min)
    lons_rect = lons[id_lons]
    lats_rect = lats[id_lats]
    # "R[:,id_lats,id_lons]" does not seem to work..
    r_tmp = R[:, id_lats, :]
    r_rect = np.array(r_tmp[:, :, id_lons])
    r_rect = np.maximum(r_rect, 0)  # replace negative value with 0
    # Apply gaussian filter (Smoothing)
    sigma = [2, 2]  # smooth 250m scale to 1km scale
    r_sm = scipy.ndimage.filters.gaussian_filter(r_rect[0, :, :],
                                                 sigma,
                                                 mode='constant')
    plt.imshow(R[0, :, :])
    plt.savefig("alljapan.png")
    # Interpolate by nearest neighbour
    intfunc = RegularGridInterpolator((lats_rect, lons_rect), r_sm)
    la2, lo2 = np.meshgrid(lats_kanto, lons_kanto)
    pts = np.vstack([la2.flatten(), lo2.flatten()])
    r_interp = intfunc(pts.T)
    r_interp = r_interp.reshape([len(lats_kanto), len(lons_kanto)]).T
    plot_with_map(r_interp, lons_kanto, lats_kanto,
                  "smooth_interp_with_map.png")
    #plot_with_map(R[0,:,:],lons,lats,"alljapan_with_map.png")
    plt.imshow(r_rect[0, :, :])
    plt.savefig("smooth_before.png")
    plot_with_map(r_rect[0, :, :], lons_rect, lats_rect,
                  "smooth_before_with_map.png")
    plt.imshow(r_sm)
    plt.savefig("smooth_after.png")
    plot_with_map(r_sm, lons_rect, lats_rect, "smooth_after_with_map.png")
    import pdb
    pdb.set_trace()
Beispiel #7
0
def make_background_from_wrf(Grid,
                             file_path,
                             wrf_time,
                             radar_loc,
                             vel_field=None):
    """
    This function makes an initalization field based off of the u and w
    from a WRF run. Only u and v are used from the WRF file.
    
    Parameters
    ----------
    Grid: Py-ART Grid object
        This is the Py-ART Grid containing the coordinates for the 
        analysis grid.
    file_path: str
        This is the path to the WRF grid
    wrf_time: datetime
        The timestep to derive the intialization field from.
    radar_loc: tuple
        The (X, Y) location of the radar in the WRF grid. The output
        coordinate system will be centered around this location
        and given the same grid specification that is specified
        in Grid.
    vel_field: str, or None
        This string contains the name of the velocity field in the 
        Grid. None will try to automatically detect this value.
        
    Returns
    -------
    u: 3D ndarray 
        The initialization u field.
    v: 3D ndarray
        The initialization v field.
    w: 3D ndarray
        The initialization w field.
        
    """
    # Parse names of velocity field
    if vel_field is None:
        vel_field = pyart.config.get_field_name('corrected_velocity')

    analysis_grid_shape = Grid.fields[vel_field]['data'].shape
    u = np.ones(analysis_grid_shape)
    v = np.ones(analysis_grid_shape)
    w = np.zeros(analysis_grid_shape)

    # Load WRF grid
    wrf_cdf = Dataset(file_path, mode='r')
    W_wrf = wrf_cdf.variables['W'][:]
    V_wrf = wrf_cdf.variables['V'][:]
    U_wrf = wrf_cdf.variables['U'][:]
    PH_wrf = wrf_cdf.variables['PH'][:]
    PHB_wrf = wrf_cdf.variables['PHB'][:]
    alt_wrf = (PH_wrf + PHB_wrf) / 9.81

    new_grid_x = Grid.point_x['data']
    new_grid_y = Grid.point_y['data']
    new_grid_z = Grid.point_z['data']

    # Find timestep from datetime
    time_wrf = wrf_cdf.variables['Times']
    ntimes = time_wrf.shape[0]
    dts_wrf = []
    for i in range(ntimes):
        x = ''.join([x.decode() for x in time_wrf[i]])
        dts_wrf.append(datetime.strptime(x, '%Y-%m-%d_%H:%M:%S'))

    dts_wrf = np.array(dts_wrf)
    timestep = np.where(dts_wrf == wrf_time)
    if (len(timestep[0]) == 0):
        raise ValueError(("Time " + str(wrf_time) + " not found in WRF file!"))

    x_len = wrf_cdf.__getattribute__('WEST-EAST_GRID_DIMENSION')
    y_len = wrf_cdf.__getattribute__('SOUTH-NORTH_GRID_DIMENSION')
    dx = wrf_cdf.DX
    dy = wrf_cdf.DY
    x = np.arange(0, x_len) * dx - radar_loc[0] * 1e3
    y = np.arange(0, y_len) * dy - radar_loc[1] * 1e3
    z = np.mean(alt_wrf[timestep[0], :, :, :], axis=(0, 2, 3))
    z_stag = (z[1:] + z[:-1]) / 2.0
    x_stag = (x[1:] + x[:-1]) / 2.0
    y_stag = (y[1:] + y[:-1]) / 2.0

    W_wrf = np.squeeze(W_wrf[timestep[0], :, :, :])
    V_wrf = np.squeeze(V_wrf[timestep[0], :, :, :])
    U_wrf = np.squeeze(U_wrf[timestep[0], :, :, :])

    w_interp = RegularGridInterpolator((z, y_stag, x_stag),
                                       W_wrf,
                                       bounds_error=False,
                                       fill_value=0.)
    v_interp = RegularGridInterpolator((z_stag, y, x_stag),
                                       V_wrf,
                                       bounds_error=False,
                                       fill_value=0.)
    u_interp = RegularGridInterpolator((z_stag, y_stag, x),
                                       U_wrf,
                                       bounds_error=False,
                                       fill_value=0.)

    u = u_interp((new_grid_z, new_grid_y, new_grid_x))
    v = v_interp((new_grid_z, new_grid_y, new_grid_x))
    w = np.zeros(u.shape)

    return u, v, w
Beispiel #8
0
    def calculate(self, source, block, phantom, settings):

        # Transform phantom to beam coords
        print("Transforming phantom to beam coords...")
        transform = Transform(source.position, source.rotation)
        phantom_beam = np.zeros_like(phantom.positions)
        _, xlen, ylen, zlen = phantom_beam.shape
        for x in tqdm(range(xlen)):
            for y in range(ylen):
                for z in range(zlen):
                    phantom_beam[:, x, y, z] = transform.global_to_beam(
                        phantom.positions[:, x, y, z], )

        print("Interpolating phantom densities...")
        phantom_densities_interp = RegularGridInterpolator(
            (phantom_beam[0, :, 0, 0], phantom_beam[1, 0, :, 0],
             phantom_beam[2, 0, 0, :]),
            phantom.densities,
            method='nearest',
            bounds_error=False,
            fill_value=0)

        # # Create dose grid (just the same size as the phantom for now)
        # self.dose_grid_positions = np.copy(phantom_beam)
        # self.dose_grid_dim = np.array([1, 1, 1], dtype=np.float64)  # cm

        # Create dose grid
        self.dose_grid_positions = np.mgrid[-20:20:41j, -20:20:41j, -40:0:41j]
        self.dose_grid_dim = np.array([1, 1, 1], dtype=np.float64)  # cm
        _, xlen, ylen, zlen = self.dose_grid_positions.shape
        for x in tqdm(range(xlen)):
            for y in range(ylen):
                for z in range(zlen):
                    self.dose_grid_positions[:, x, y,
                                             z] = transform.global_to_beam(
                                                 self.dose_grid_positions[:, x,
                                                                          y,
                                                                          z], )

        # Perform hit testing to find which dose grid voxels are in the beam
        print("Performing hit-testing of dose grid voxels...")
        _, xlen, ylen, zlen = self.dose_grid_positions.shape
        dose_grid_blocked = np.zeros((xlen, ylen, zlen))
        dose_grid_OAD = np.zeros((xlen, ylen, zlen))
        for x in tqdm(range(xlen)):
            for y in range(ylen):
                for z in range(zlen):

                    position = self.dose_grid_positions[:, x, y, z]

                    samples = settings['fluenceResampling']
                    offset = self.dose_grid_dim / 3

                    block_factor = 0
                    for ix in range(samples):
                        for iy in range(samples):
                            for iz in range(samples):
                                position_iso = isocentre_plane_position(
                                    np.array([
                                        position[0] - offset[0] +
                                        offset[0] * ix, position[1] -
                                        offset[1] + offset[1] * iy,
                                        position[2] - offset[2] +
                                        offset[2] * iz
                                    ]), source.SAD)
                                block_factor += block.transmission(
                                    position_iso) / samples**3
                    dose_grid_blocked[x, y, z] = block_factor

                    # Save off-axis distance (at iso plane) for later
                    position_iso = isocentre_plane_position(
                        position, source.SAD)
                    dose_grid_OAD[x, y, z] = (np.sqrt(
                        np.sum(np.power(position_iso, 2))))

        # Calculate effective depths of dose grid voxels
        print("Calculating effective depths of dose grid voxels...")
        dose_grid_d_eff = np.zeros_like(dose_grid_blocked)
        xlen, ylen, zlen = dose_grid_d_eff.shape
        max_z = np.max(self.dose_grid_positions[2, :, :, :])
        print(max_z)
        for x in tqdm(range(xlen)):
            for y in range(ylen):
                for z in range(zlen):
                    if not dose_grid_blocked[x, y, z]:
                        continue
                    voxel = self.dose_grid_positions[:, x, y, z]
                    psi = line_calc_limit_plane_collision(
                        voxel, np.array([0, 0, max_z + 5.0]))
                    dist = np.sqrt(np.sum(np.power(voxel - psi, 2)))
                    num_steps = np.floor(dist / settings['stepSize'])
                    xcoords = np.linspace(voxel[0], psi[0], num_steps)
                    ycoords = np.linspace(voxel[1], psi[1], num_steps)
                    zcoords = np.linspace(voxel[2], psi[2], num_steps)
                    dose_grid_d_eff[x, y, z] = np.sum(
                        phantom_densities_interp(
                            np.dstack((xcoords, ycoords, zcoords))) *
                        settings['stepSize'])

        # Calculate photon fluence at dose grid voxels
        print("Calculating fluence...")
        # Point source
        self.dose_grid_fluence = np.zeros_like(dose_grid_blocked)
        xlen, ylen, zlen = self.dose_grid_fluence.shape
        self.dose_grid_fluence = (
            settings['sPri'] *
            np.power(-source.SAD / self.dose_grid_positions[2, :, :, :], 2) *
            dose_grid_blocked)
        # Annular source
        r = np.sqrt(
            np.power(self.dose_grid_positions[0, :, :, :], 2) +
            np.power(self.dose_grid_positions[1, :, :, :], 2))
        r_ann = r * settings['zAnn'] / self.dose_grid_positions[2, :, :, :]
        self.dose_grid_fluence += (
            settings['sAnn'] *
            self._in_annulus(r_ann, settings['rInner'], settings['rOuter']) *
            np.power(-source.SAD + settings['zAnn'], 2.0) / np.power(
                self.dose_grid_positions[2, :, :, :] + settings['zAnn'], 2) *
            dose_grid_blocked)
        # Exponential source
        r_exp = r * settings['zExp'] / self.dose_grid_positions[2, :, :, :]
        r_exp[r_exp < 1.0] = 1.0  # Avoid function blowing up near zero
        self.dose_grid_fluence += (
            settings['sExp'] / r_exp * np.exp(-settings['kExp'] * r_exp) *
            np.power(-source.SAD + settings['zExp'], 2.0) / np.power(
                self.dose_grid_positions[2, :, :, :] + settings['zExp'], 2) *
            dose_grid_blocked)

        # Calculate beam softening factor for dose grid voxels
        print("Calculating beam softening factor...")
        f_soften = np.ones_like(dose_grid_OAD)
        f_soften[dose_grid_OAD < settings['softLimit']] = 1 / (
            1 - settings['softRatio'] *
            dose_grid_OAD[dose_grid_OAD < settings['softLimit']])

        # Calculate beam softening factor for dose grid voxels
        print("Calculating horn factor...")
        f_horn = np.ones_like(dose_grid_OAD)
        f_horn += dose_grid_OAD * settings['hornRatio']

        # Calculate TERMA of dose grid voxels
        print("Calculating TERMA...")
        E = np.linspace(settings['eLow'], settings['eHigh'], settings['eNum'])
        spectrum_weights = source.weights(E)
        mu_w = mu_water(E)
        self.dose_grid_terma = np.zeros_like(dose_grid_blocked)
        xlen, ylen, zlen = self.dose_grid_terma.shape
        for x in tqdm(range(xlen)):
            for y in range(ylen):
                for z in range(zlen):
                    if not dose_grid_blocked[x, y, z]:
                        continue
                    self.dose_grid_terma[x, y, z] = (np.sum(
                        spectrum_weights * self.dose_grid_fluence[x, y, z] *
                        np.exp(-mu_w * f_soften[x, y, z] *
                               dose_grid_d_eff[x, y, z]) * E * mu_w) *
                                                     f_horn[x, y, z])

        # Calculate dose of dose grid voxels
        print("Convolving kernel...")
        kernel = PolyenergeticKernel()
        dose_grid_dose = np.zeros_like(self.dose_grid_terma, dtype=np.float64)
        phis = np.array(sorted(
            [p for p in kernel.cumulative.keys() if p != "radii"]),
                        dtype=np.float64)
        thetas = np.linspace(0, 360, 12, endpoint=False, dtype=np.float64)
        convolve_c(self.dose_grid_terma, dose_grid_dose, self.dose_grid_dim,
                   thetas, phis, kernel)
        self.dose_grid_dose = dose_grid_dose
Beispiel #9
0
    def _gaussian_diffusion(self, diff, dsmooth):
        """
        Gaussian filter operation used to smooth diffusion related deposition thicknesses.

        Args:
            diff: numpy arrays containing the deposition thicknesses.
            dsmooth: smoothing parameter.

        Returns
        -------
        zdepsmth
            numpy array of smoothed deposition thicknesses.

        """

        if self.xgrid is None:
            dx = self.xycoords[1, 0] - self.xycoords[0, 0]
            xmin, xmax = min(self.xycoords[:, 0]), max(self.xycoords[:, 0])
            ymin, ymax = min(self.xycoords[:, 1]), max(self.xycoords[:, 1])
            self.xgrid = numpy.arange(xmin, xmax + dx, dx)
            self.ygrid = numpy.arange(ymin, ymax + dx, dx)
            self.xi, self.yi = numpy.meshgrid(self.xgrid, self.ygrid)

            # Querying the cKDTree later becomes a bottleneck, so distribute the xyi array across all MPI nodes
            xyi = numpy.dstack([self.xi.flatten(), self.yi.flatten()])[0]
            splits = numpy.array_split(xyi, 1)
            split_lengths = numpy.array(list(map(len, splits))) * 3
            localxyi = splits[0]
            query_shape = (xyi.shape[0], 3)

            # Build Tree
            tree = cKDTree(self.xycoords[:, :2])

            # Querying the KDTree is rather slow, so we split it across MPI nodes
            nelems = query_shape[0] * query_shape[1]
            indices = numpy.empty(query_shape, dtype=numpy.int64)
            localdistances, localindices = tree.query(localxyi, k=3)

            self.distances = localdistances
            self.indices = localindices
            self.onIDs = numpy.where(self.distances[:, 0] == 0)[0]

        depZ = numpy.copy(diff)

        if len(depZ[self.indices].shape) == 3:
            zd_vals = depZ[self.indices][:, :, 0]
        else:
            zd_vals = depZ[self.indices]

        with numpy.errstate(divide='ignore'):
            zdi = numpy.average(zd_vals, weights=(1. / self.distances), axis=1)

        if len(self.onIDs) > 0:
            zdi[self.onIDs] = depZ[self.indices[self.onIDs, 0]]

        depzi = numpy.reshape(zdi, (len(self.ygrid), len(self.xgrid)))

        smthDep = gaussian_filter(depzi, sigma=dsmooth)

        rgi_dep = RegularGridInterpolator((self.ygrid, self.xgrid), smthDep)
        zdepsmth = rgi_dep((self.xycoords[:, 1], self.xycoords[:, 0]))

        return zdepsmth
Beispiel #10
0
print(('Reading...' + args.filefrag1))
mycube = load_cube.cube()

if not (os.path.isfile(args.filefrag1)):
    print("File ", args.filefrag1, " does not exist ")
    exit(1)

mycube.readfile(args.filefrag1)
x, y, z = np.array(mycube.get_grid_xyz(), dtype=object)

data = mycube.get_data()

print(('Interpolating...' + args.filefrag1))
my_interpolating_function1 = RegularGridInterpolator((x, y, z),
                                                     data,
                                                     method='linear')

print(('Reading...' + args.filefrag2))
mycube = load_cube.cube()

if not (os.path.isfile(args.filefrag2)):
    print("File ", args.filefrag2, " does not exist ")
    exit(1)

mycube.readfile(args.filefrag2)
x2, y2, z2 = np.array(mycube.get_grid_xyz(), dtype=object)
data = mycube.get_data()

print(('Interpolating...' + args.filefrag2))
my_interpolating_function2 = RegularGridInterpolator((x2, y2, z2),
Beispiel #11
0
def pykmod(teff, logg, microt, metal, outfile):
    path = os.path.abspath(os.path.dirname(__file__))
    kpath = path + '/modelatmospheres/'
    availteff = np.append(
        np.append(np.asarray([3500 + (250 * i) for i in range(35)]),
                  np.asarray([12500 + (500 * i) for i in range(16)])),
        np.asarray([21000 + (1000 * i) for i in range(10)]))
    availlogg = np.asarray([0.5 * i for i in range(11)])
    availmetal = np.asarray([
        -5, -4.5, -4, -3.5, -3, -2.8, -2.5, -2.3, -2.0, -1.8, -1.5, -1.3, -1.0,
        -0.8, -0.5, -0.3, 0, 0.3, 0.5, 0.8, 1, 1.5
    ])
    atmotype = 'odfnew'
    ntau = 72

    v1 = np.where(np.abs(availteff - teff) <= 0.1)
    v2 = np.where(np.abs(availlogg - logg) <= 0.001)
    v3 = np.where(np.abs(availmetal - metal) <= 0.001)

    if (min(availteff) <= teff <= max(availteff)) and (
            min(availlogg) <= logg <=
            max(availlogg)) and (min(availmetal) <= metal <= max(availmetal)):

        #If no interpolation needed, get correct model
        if len(v1[0]) > 0 and len(v2[0]) > 0 and len(v3[0]) > 0:
            model = rd_kmod(availteff[v1[0]][0], availlogg[v2[0]][0],
                            availmetal[v3[0]][0])[0]

        else:
            tm1 = max(availteff[np.where(availteff <= teff)])
            lm1 = max(availlogg[np.where(availlogg <= logg)])
            mm1 = max(availmetal[np.where(availmetal <= metal)])
            tp1 = min(availteff[np.where(availteff >= teff)])
            lp1 = min(availlogg[np.where(availlogg >= logg)])
            mp1 = min(availmetal[np.where(availmetal >= metal)])

            ncols = 10
            grid = np.zeros((2, 2, 2, ncols))

            if tp1 != tm1:
                mapteff = (teff - tm1) / (tp1 - tm1)
            else:
                mapteff = 0.5
            if lp1 != lm1:
                maplogg = (logg - lm1) / (lp1 - lm1)
            else:
                maplogg = 0.5
            if mp1 != mm1:
                mapmetal = (metal - mm1) / (mp1 - mm1)
            else:
                mapmetal = 0.5

            for i in range(1, 9):
                if i == 1: model = rd_kmod(tm1, lm1, mm1)[0]
                if i == 2: model = rd_kmod(tm1, lm1, mp1)[0]
                if i == 3: model = rd_kmod(tm1, lp1, mm1)[0]
                if i == 4: model = rd_kmod(tm1, lp1, mp1)[0]
                if i == 5: model = rd_kmod(tp1, lm1, mm1)[0]
                if i == 6: model = rd_kmod(tp1, lm1, mp1)[0]
                if i == 7: model = rd_kmod(tp1, lp1, mm1)[0]
                if i == 8: model = rd_kmod(tp1, lp1, mp1)[0]

                rhox = np.array([model[i][0] for i in range(ntau)])
                kappaross = np.array([model[i][4] for i in range(ntau)])
                tauross = np.zeros(ntau)
                tauross[0] = rhox[0] * kappaross[0]
                for ii in range(1, ntau):
                    tauross[ii] = simpson(kappaross[0:ii], rhox[0:ii])

                if i == 1:
                    model1 = model
                    tauross1 = tauross
                elif i == 2:
                    model2 = model
                    tauross2 = tauross
                elif i == 3:
                    model3 = model
                    tauross3 = tauross
                elif i == 4:
                    model4 = model
                    tauross4 = tauross
                elif i == 5:
                    model5 = model
                    tauross5 = tauross
                elif i == 6:
                    model6 = model
                    tauross6 = tauross
                elif i == 7:
                    model7 = model
                    tauross7 = tauross
                elif i == 8:
                    model8 = model
                    tauross8 = tauross

            model = np.zeros((ntau, ncols))

            tauross = tauross1
            bot_tauross = min([
                tauross1[-1], tauross2[-1], tauross3[-1], tauross4[-1],
                tauross5[-1], tauross6[-1], tauross7[-1], tauross8[-1]
            ])
            top_tauross = max([
                tauross1[0], tauross2[0], tauross3[0], tauross4[0],
                tauross5[0], tauross6[0], tauross7[0], tauross8[0]
            ])

            tauross_new = np.interp(
                np.array(range(ntau)),
                np.array(range(ntau))[np.where((tauross >= top_tauross)
                                               & (tauross <= bot_tauross))],
                tauross[np.where((tauross >= top_tauross)
                                 & (tauross <= bot_tauross))])
            myinterp = interp1d(np.array(
                range(ntau))[np.where((tauross >= top_tauross)
                                      & (tauross <= bot_tauross))],
                                tauross[np.where((tauross >= top_tauross)
                                                 & (tauross <= bot_tauross))],
                                fill_value='extrapolate',
                                kind='cubic')
            tauross_new = myinterp(range(ntau))

            for i in range(ntau):
                for j in range(ncols):

                    interpmin = max(0, i - 1000)
                    interpmax = min(i + 1000, ntau - 1)

                    myinterp = interp1d(tauross1[interpmin:interpmax],
                                        model1[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[0, 0, 0, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross2[interpmin:interpmax],
                                        model2[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[0, 0, 1, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross3[interpmin:interpmax],
                                        model3[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[0, 1, 0, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross4[interpmin:interpmax],
                                        model4[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[0, 1, 1, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross5[interpmin:interpmax],
                                        model5[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[1, 0, 0, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross6[interpmin:interpmax],
                                        model6[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[1, 0, 1, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross7[interpmin:interpmax],
                                        model7[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[1, 1, 0, j] = myinterp(tauross_new[i])
                    myinterp = interp1d(tauross8[interpmin:interpmax],
                                        model8[interpmin:interpmax, j],
                                        fill_value='extrapolate',
                                        kind='linear')
                    grid[1, 1, 1, j] = myinterp(tauross_new[i])

                    myinterp2 = RegularGridInterpolator(
                        [[0, 1], [0, 1], [0, 1]], grid[:, :, :, j])
                    model[i, j] = myinterp2([mapteff, maplogg, mapmetal])[0]

        with open(outfile, 'w') as f:

            #Write the header
            teffstring = "{:7.0f}".format(teff)
            loggstring = "{:8.5f}".format(logg)
            f.write('KURUCZ\n')
            f.write('TEFF' + teffstring + '.  GRAVITY' + loggstring +
                    ' LTE \n')
            f.write('NTAU          72\n')

            # Write the body
            for i in range(ntau):
                f.write("{:15.8e}".format(model[i][0]).replace('e', 'E'))
                f.write("{:9.1f}".format(model[i][1]))
                f.write("{:10.3e}".format(model[i][2]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][3]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][4]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][5]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][6]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][7]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][8]).replace('e', 'E'))
                f.write("{:10.3e}".format(model[i][9]).replace('e', 'E'))
                f.write('\n')

            #Write the footer
            f.write(str(round(microt, 2)) + '\n')
            f.write('NATOMS         0	' + str(round(metal, 1)) + '\n')
def main():
    """
    Initialize environments
    """
    m = 1
    mass_factor = 2.1
    l = 1
    length_factor = 1.1
    b = 0.15
    g = 9.81
    dt = 0.005
    goal = np.array([[np.pi], [0]])
    x_limits = np.array([0, 6.2832])
    numPointsx = 51
    dx = (x_limits[-1] - x_limits[0]) / (numPointsx - 1)
    x_dot_limits = np.array([-6.5, 6.5])
    numPointsx_dot = 81
    dx_dot = (x_dot_limits[-1] - x_dot_limits[0]) / (numPointsx_dot - 1)
    Q = np.array([[40, 0], [0, 0.02]])
    R = 0.2
    test_policies = False
    environment = pendulum(m, l, b, g, dt, goal, x_limits, dx, x_dot_limits,
                           dx_dot, Q, R)
    environment_target = pendulum(m * mass_factor, l * length_factor, b, g, dt,
                                  goal, x_limits, dx, x_dot_limits, dx_dot, Q,
                                  R)
    """
    Learn an initial policy and value function
    """
    gamma = 0.99
    x_grid = np.linspace(x_limits[0], x_limits[1], numPointsx)
    x_dot_grid = np.linspace(x_dot_limits[0], x_dot_limits[1], numPointsx_dot)
    u_limits = np.array([-15, 15])
    numPointsu = 121
    u_grid = np.linspace(u_limits[0], u_limits[1], numPointsu)
    num_iterations = 600

    code_dir = os.path.dirname(os.path.realpath(__file__))
    data_dir = '../data/GPTD'
    data_dir = os.path.join(code_dir, data_dir)

    print('Value Iteration for target domain')
    target_file = 'data_m_%.2f_l_%.2f.pkl' % (m * mass_factor,
                                              l * length_factor)
    fileFound = False
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if (file.endswith('.pkl') and file == target_file):
                fileFound = True
                print('Relevant pre-computed data found!')
                data = pkl.load(open(os.path.join(data_dir, target_file),
                                     'rb'))
                policy_target = data[0]
                V_target = data[1]
    if (not fileFound):
        policy_target, V_target = ValueIterationSwingUp(
            environment_target, gamma, x_grid, x_dot_grid, u_grid,
            num_iterations)
        pkl.dump((policy_target, V_target),
                 open(os.path.join(data_dir, target_file), 'wb'))

    print('Value Iteration in simulation')
    start_file = 'data_m_%.2f_l_%.2f.pkl' % (m, l)
    fileFound = False
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if (file.endswith('.pkl') and file == start_file):
                fileFound = True
                print('Relevant pre-computed data found!')
                data = pkl.load(open(os.path.join(data_dir, start_file), 'rb'))
                policy_start = data[0]
                V_start = data[1]
    if (not fileFound):
        policy_start, V_start = ValueIterationSwingUp(environment, gamma,
                                                      x_grid, x_dot_grid,
                                                      u_grid, num_iterations)
        pkl.dump((policy_start, V_start),
                 open(os.path.join(data_dir, start_file), 'wb'))

    V_target = np.reshape(V_target, (numPointsx, numPointsx_dot))
    V_start = np.reshape(V_start, (numPointsx, numPointsx_dot))
    policy_target = np.reshape(policy_target, (numPointsx, numPointsx_dot))
    policy_start = np.reshape(policy_start, (numPointsx, numPointsx_dot))
    """
    Test learned policies
    """
    if (test_policies):

        policy_start_ = RegularGridInterpolator((x_grid, x_dot_grid),
                                                policy_start)
        dyn_start = lambda t, s: environment_target.dynamics_continuous(
            s, policy_start_)
        int_start = ode(dyn_start).set_integrator('vode',
                                                  method='bdf',
                                                  with_jacobian=False)
        int_start.set_initial_value(np.array([[0], [0]]), 0)
        t_final = 10
        trajectory_start = np.empty((2, int(t_final / dt)))
        num_steps = 0
        while int_start.successful() and int_start.t < t_final:
            int_start.integrate(int_start.t + dt)
            trajectory_start[:, num_steps] = int_start.y[:, 0]
            num_steps += 1

        trajectory_start = trajectory_start[:, 0:num_steps]
        plt.plot(trajectory_start[0, :], trajectory_start[1, :])
        plt.scatter(np.pi, 0, c='red', marker='o')
        plt.xlabel('theta')
        plt.ylabel('theta-dot')
        plt.title('Bootstrapped Policy')
        plt.show()

        policy_target_ = RegularGridInterpolator((x_grid, x_dot_grid),
                                                 policy_target)
        dyn_target = lambda t, s: environment_target.dynamics_continuous(
            s, policy_target_)
        int_target = ode(dyn_target).set_integrator('vode',
                                                    method='bdf',
                                                    with_jacobian=False)
        int_target.set_initial_value(np.array([[0], [0]]), 0)
        trajectory_target = np.empty((2, int(t_final / dt)))
        num_steps = 0
        while int_target.successful() and int_target.t < t_final:
            int_target.integrate(int_target.t + dt)
            trajectory_target[:, num_steps] = int_target.y[:, 0]
            num_steps += 1

        trajectory_target = trajectory_target[:, 0:num_steps]
        plt.plot(trajectory_target[0, :], trajectory_target[1, :])
        plt.scatter(np.pi, 0, c='red', marker='o')
        plt.xlabel('theta')
        plt.ylabel('theta-dot')
        plt.title('Target Policy')
        plt.show()
    """
    GPTD
    """
    sigma0 = 0.2
    sigmaf = 7.6156
    sigmal = np.array([[0.6345], [1.2656]], dtype=np.float64)
    kernel = SqExpArd(sigmal, sigmaf)
    policy_target_ = RegularGridInterpolator((x_grid, x_dot_grid),
                                             policy_target)
    policy_prior = lambda s: policy_target_(s.T)[:, np.newaxis]
    V_mu = RegularGridInterpolator((x_grid, x_dot_grid), V_start)
    V_mu_ = lambda s: V_mu(s.T)[:, np.newaxis]

    nu = (np.exp(-1) - 0.3)
    max_episode_length = 1000
    num_episodes = 500
    states = np.mgrid[x_grid[0]:(x_grid[-1] + dx):dx,
                      x_dot_grid[0]:(x_dot_grid[-1] + dx_dot):dx_dot]
    states = np.concatenate((np.reshape(states[0,:,:], (1,states.shape[1]*states.shape[2])),\
                    np.reshape(states[1,:,:], (1,states.shape[1]*states.shape[2]))), axis=0)

    gptd = GPTD_rewardBased(environment_target, nu, sigma0, gamma, kernel,
                            V_mu_)
    print('GPTD.. ')
    gptd.build_posterior(policy_prior, num_episodes, max_episode_length)
    V_gptd = gptd.get_value_function(states)
    V_gptd = np.reshape(V_gptd, (numPointsx, numPointsx_dot))
    print('Initial mean error:%f' % np.mean(np.abs(V_target - V_start)))
    print('Final mean error:%f' % np.mean(np.abs(V_target - V_gptd)))
    set_trace()
    """
    Results
    """
    plt.subplot(3, 1, 1)
    plt.imshow(np.abs(V_target - V_start).T, aspect='auto',\
        extent=(x_limits[0], x_limits[1], x_dot_limits[1], x_dot_limits[0]), origin='upper')
    plt.ylabel('theta-dot')
    plt.xlabel('theta')
    plt.title('Initial Diff')
    plt.colorbar()

    plt.subplot(3, 1, 2)
    plt.imshow(np.abs(V_target - V_gptd).T, aspect='auto',\
        extent=(x_limits[0], x_limits[1], x_dot_limits[1], x_dot_limits[0]), origin='upper')
    plt.ylabel('theta-dot')
    plt.xlabel('theta')
    plt.title('Final Diff')
    plt.colorbar()

    plt.subplot(3, 1, 3)
    plt.scatter(gptd.D[0, :], gptd.D[1, :], marker='o', c='red')
    plt.xlim(x_limits[0], x_limits[1])
    plt.xlabel('theta')
    plt.ylim(x_dot_limits[0], x_dot_limits[1])
    plt.ylabel('theta-dot')
    plt.title('Dictionary Points')

    resultDirName = 'GPTD_rewardBased_run'
    run = -1
    for root, dirs, files in os.walk(data_dir):
        for d in dirs:
            if (d.startswith(resultDirName)):
                extension = d.split(resultDirName)[-1]
                if (extension.isdigit() and int(extension) >= run):
                    run = int(extension)
    run += 1
    saveDirectory = os.path.join(data_dir, resultDirName + str(run))
    os.mkdir(saveDirectory)
    dl.dump_session(filename=os.path.join(saveDirectory, 'session_%d' %
                                          num_episodes))
    plt.savefig(os.path.join(saveDirectory, 'V_Diff.png'))
    plt.show()
    set_trace()
Beispiel #13
0
def calc_gamma(coords_reference,
               dose_reference,
               coords_evaluation,
               dose_evaluation,
               distance_threshold,
               dose_threshold,
               lower_dose_cutoff=0,
               distance_step_size=None,
               maximum_test_distance=np.inf,
               max_concurrent_calc_points=np.inf,
               num_threads=1):
    """[DEPRECATED] Use `pymedphys.gamma` instead. See
    https://pymedphys.com/en/latest/user/gamma.html

    Compare two dose grids with the gamma index.

    Args:
        coords_reference (tuple): The reference coordinates.
        dose_reference (np.array): The reference dose grid.
        coords_evaluation (tuple): The evaluation coordinates.
        dose_evaluation (np.array): The evaluation dose grid.
        distance_threshold (float): The gamma distance threshold. Units must
            match of the coordinates given.
        dose_threshold (float): An absolute dose threshold.
            If you wish to use 3% of maximum reference dose input
            np.max(dose_reference) * 0.03 here.
        lower_dose_cutoff (:obj:`float`, optional): The lower dose cutoff below
            which gamma will not be calculated.
        distance_step_size (:obj:`float`, optional): The step size to use in
            within the reference grid interpolation. Defaults to a tenth of the
            distance threshold as recommended within
            <http://dx.doi.org/10.1118/1.2721657>.
        maximum_test_distance (:obj:`float`, optional): The distance beyond
            which searching will stop. Defaults to np.inf. To speed up
            calculation it is recommended that this parameter is set to
            something reasonable such as 2*distance_threshold

    Returns:
        gamma (np.array): The array of gamma values the same shape as that
            given by the evaluation coordinates and dose.
    """
    warnings.warn(WARNING_STRING, UserWarning)

    coords_reference, coords_evaluation = _run_input_checks(
        coords_reference, dose_reference, coords_evaluation, dose_evaluation)

    if distance_step_size is None:
        distance_step_size = distance_threshold / 10

    reference_interpolation = RegularGridInterpolator(coords_reference,
                                                      np.array(dose_reference),
                                                      bounds_error=False,
                                                      fill_value=np.inf)

    dose_evaluation = np.array(dose_evaluation)
    dose_evaluation_flat = np.ravel(dose_evaluation)

    mesh_coords_evaluation = np.meshgrid(*coords_evaluation, indexing='ij')
    coords_evaluation_flat = [
        np.ravel(item) for item in mesh_coords_evaluation
    ]

    evaluation_index = np.arange(len(dose_evaluation_flat))
    np.random.shuffle(evaluation_index)
    thread_indicies = np.array_split(evaluation_index, num_threads)

    output = Queue()

    kwargs = {
        "coords_reference": coords_reference,
        "num_dimensions": len(coords_evaluation),
        "reference_interpolation": reference_interpolation,
        "lower_dose_cutoff": lower_dose_cutoff,
        "distance_threshold": distance_threshold,
        "dose_threshold": dose_threshold,
        "distance_step_size": distance_step_size,
        "max_concurrent_calc_points": max_concurrent_calc_points / num_threads,
        "maximum_test_distance": maximum_test_distance
    }

    for thread_index in thread_indicies:
        thread_index.sort()
        thread_dose_evaluation = dose_evaluation_flat[thread_index]
        thread_coords_evaluation = [
            coords[thread_index] for coords in coords_evaluation_flat
        ]
        kwargs['dose_evaluation'] = thread_dose_evaluation
        kwargs['mesh_coords_evaluation'] = thread_coords_evaluation

        Process(target=_new_thread,
                args=(kwargs, output, thread_index,
                      np.nan * np.ones_like(dose_evaluation_flat))).start()

    gamma_flat = np.nan * np.ones_like(dose_evaluation_flat)

    for i in range(num_threads):
        result = output.get()
        thread_reference = np.invert(np.isnan(result))
        gamma_flat[thread_reference] = result[thread_reference]

    assert np.all(np.invert(np.isnan(gamma_flat)))

    gamma_flat[np.isinf(gamma_flat)] = np.nan
    gamma = np.reshape(gamma_flat, np.shape(dose_evaluation))

    return gamma
Beispiel #14
0
 def test_fill_value(self, method):
     interp = RegularGridInterpolator([np.arange(6)],
                                      np.ones(6),
                                      method=method,
                                      bounds_error=False)
     assert np.isnan(interp([10]))
Beispiel #15
0
    def buildStrata(self, elev, cumdiff, sea, boundsPt, write=0, outstep=0):
        """
        Build the stratigraphic layer on the regular grid.

        Args:
            elev: numpy float-type array containing the elevation of the nodes in the TIN
            cumdiff: numpy float-type array containing the cumulative erosion/deposition of the nodes in the TIN
            sea: sea level elevation
            boundPts: number of nodes on the edges of the TIN surface.
            write: flag for output generation
            outstep: sStep for output generation

        Returns:
            - sub_poro - numpy array containing the subsidence induced by porosity change.
        """

        selev = numpy.zeros(len(self.xyi))
        distances, indices = self.tree.query(self.xyi, k=self.searchpts)
        with numpy.errstate(divide="ignore"):
            weights = 1.0 / distances ** 2

        if self.oldload is not None:
            load_diff = cumdiff - self.oldload
        else:
            load_diff = cumdiff
        if len(elev[indices].shape) == 3:
            elev_vals = elev[indices][:, :, 0]
            cum_vals = load_diff[indices][:, :, 0]
        else:
            elev_vals = elev[indices]
            cum_vals = load_diff[indices]

        felev = numpy.average(elev_vals, weights=weights, axis=1)
        fcum = numpy.average(cum_vals, weights=weights, axis=1)
        onIDs = numpy.where(distances[:, 0] == 0)[0]
        if len(onIDs) > 0:
            felev[onIDs] = elev[indices[onIDs, 0]]
            fcum[onIDs] = load_diff[indices[onIDs, 0]]
        self.oldload = numpy.copy(cumdiff)
        selev = felev

        # Update stratal elevation
        self.stratElev[self.ids, self.step] = selev[self.ids] - sea

        localCum = fcum[self.ids]
        # Update stratal erosion
        eroIDs = numpy.where(localCum < 0.0)[0]
        self.eroLayer(self.ids[eroIDs], localCum)

        # Update stratal deposition
        depIDs = numpy.where(localCum > 0.0)[0]
        subs = self.depoLayer(self.ids[depIDs], localCum)
        subsi = numpy.reshape(subs, (len(self.ygrid), len(self.xgrid)))
        subs_values = RegularGridInterpolator((self.ygrid, self.xgrid), subsi)
        sub_poro = numpy.zeros(len(self.xyTIN[:, 0]))
        sub_poro[boundsPt:] = subs_values(
            (self.xyTIN[boundsPt:, 1], self.xyTIN[boundsPt:, 0])
        )
        sub_poro[sub_poro > 0.0] = 0.0

        self.oldload += sub_poro
        if write > 0:
            self.layerMesh(selev[self.ids] + subs[self.ids])
            self.write_hdf5_stratal(outstep - 1)

        self.step += 1

        return sub_poro
Beispiel #16
0
# Evaluate a simple example function on the points of a 3D grid:

import numpy as np
from scipy.interpolate import RegularGridInterpolator
def f(x, y, z):
    return 2 * x**3 + 3 * y**2 - z
x = np.linspace(1, 4, 11)
y = np.linspace(4, 7, 22)
z = np.linspace(7, 9, 33)
data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))

# ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
# Next, define an interpolating function from this data:

my_interpolating_function = RegularGridInterpolator((x, y, z), data)

# Evaluate the interpolating function at the two points
# ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:

pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
my_interpolating_function(pts)
# array([ 125.80469388,  146.30069388])

# which is indeed a close approximation to
# ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.

# With the spline interpolation methods it is possible to compute smooth
# gradients for a variety of purposes, such as numerical optimization.

# To demonstrate this, let's define a function with known gradients for
# demonstration, and create grid sample axes with a variety of sizes:
Beispiel #17
0
    def __init__(self, filename=None,
                 degree=1,
                 try_local_first=True,
                 approximation_mixing_threshold=0.5,
                 _limit=np.inf,
                 *args, **kwargs):
        """
        Parameters
        ----------
        filename : str
        degree
        _limit
        ground_truth : bool
        """
        self._appoximation_mixing_threshold = approximation_mixing_threshold
        with np.load(self.solution_path(filename,
                                        try_local_first)) as fh:
            self.load_specific_attributes(fh)
            self.a = fh['A_{}'.format(degree)]

            COMPRESSED = fh[self.solution_array_name(degree, *args, **kwargs)]
            sampling_frequency = fh['sampling_frequency']

            N = min(_limit, fh['N'])

            stride = 2 * N - 1
            linear_stride = (N - 1) * sampling_frequency + 1

            self.POTENTIAL = self.empty(stride ** 3)
            self.X = self.empty(stride ** 3)
            self.Y = self.empty(stride ** 3)
            self.Z = self.empty(stride ** 3)

            POTENTIAL = self.empty((linear_stride,
                                    linear_stride,
                                    linear_stride))

            # WARNING: subsampling without filtering
            for x in range(0, N * sampling_frequency, sampling_frequency):
                for y in range(0, x + 1, sampling_frequency):
                    for z in range(0, y + 1, sampling_frequency):
                        val = COMPRESSED[x * (x + 1) * (x + 2) // 6
                                         + y * (y + 1) // 2
                                         + z]
                        # NOTE: if x == y, x == z or y == z
                        # itertools.permutations() may repeat xs, ys, zs
                        for xs, ys, zs in itertools.permutations(
                                [self._abs_inv_list(x // sampling_frequency),
                                 self._abs_inv_list(y // sampling_frequency),
                                 self._abs_inv_list(z // sampling_frequency),
                                 ]):
                            for i, j, k in itertools.product(xs, ys, zs):
                                idx = (((N - 1 + i)
                                        * stride + N - 1 + j)
                                       * stride) + N - 1 + k

                                self.POTENTIAL[idx] = val
                                self.X[idx] = i
                                self.Y[idx] = j
                                self.Z[idx] = k

            for x in range(0, linear_stride):
                for y in range(0, x + 1):
                    for z in range(0, y + 1):
                        val = COMPRESSED[x * (x + 1) * (x + 2) // 6
                                         + y * (y + 1) // 2
                                         + z]
                        for i, j, k in itertools.permutations([x, y, z]):
                            POTENTIAL[i, j, k] = val

        self._radius = (linear_stride - 1.0) / sampling_frequency
        LINSPACE = np.linspace(0, self._radius, linear_stride)
        self._interpolator = RegularGridInterpolator((LINSPACE,
                                                      LINSPACE,
                                                      LINSPACE),
                                                     POTENTIAL,
                                                     bounds_error=False)
Beispiel #18
0
    #    tx = 1300*np.exp(-x/300)
    #
    #    return np.tile(tx,(ny,1))

    unsm = 293.15 + 1200 * np.random.rand(nx, ny)

    sig = 1
    return ndimage.filters.gaussian_filter(unsm, [sig, sig], mode='constant')


t2d = T2D(nx, ny)

nn = 10
xn = np.arange(0, lx, nn)
yn = np.arange(0, ly, nn)
itp = RegularGridInterpolator((y, x), t2d, method='nearest')

grid = np.ix_(yn, xn)
t2d_int = itp(grid)


def N2D(r):

    if len(r) > 2:

        return 1 + (0.000293 * 293.15) / T2D(nx, ny)

    else:

        global x, y
Beispiel #19
0
 def eval_cell(self, values, x, cell):
     interp_handle = RegularGridInterpolator((self.X, self.Y), self.image)
     values[0] = interp_handle(x)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()

range_data = {}
plt.figure()
for concept in concepts:
    data = np.loadtxt('./' + state + '_' + concept + '.txt')
    aoa = np.unique(data[:, 0])
    velocity = np.unique(data[:, 1])
    cl = data[:, 2].reshape([200, 200])
    LD_ratio = data[:, 3].reshape([200, 200])

    f_LD = RegularGridInterpolator((velocity, aoa),
                                   LD_ratio,
                                   fill_value=0,
                                   bounds_error=False)
    f_L = RegularGridInterpolator((velocity, aoa),
                                  cl,
                                  fill_value=0,
                                  bounds_error=False)

    # velocity = np.linspace(20, 65, 7)
    # plt.figure()
    # aoas = np.linspace(0,12,1000)
    # for i in range(len(velocity)):
    # data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
    # plt.plot(aoas, f_L(data_i), label = velocity[i])
    # # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
    # plt.legend()
    # plt.show()
Beispiel #21
0
    def Reconstruction(self, savefile):
        ''' TO DOs: zero pad filter and projection data'''
        R = self.params['SourceToAxis']
        D = self.params['SourceToDetector'] - R
        nx = int(self.params['DetectorWidth'])
        ny = int(self.params['DetectorHeight'])
        ns = int(self.params['NumberOfViews'])
        DetectorPixelWidth = self.params['DetectorPixelWidth']
        DetectorPixelHeight = self.params['DetectorPixelHeight']
        recon = np.zeros([
            self.params['ReconX'], self.params['ReconY'], self.params['ReconZ']
        ])
        DetectorSize = [nx * DetectorPixelWidth, ny * DetectorPixelHeight]
        ZeroPaddedLength = int(2**(ceil(log2(2 * (nx - 1)))))
        fov = 2.0 * R * sin(atan(DetectorSize[0] / 2.0 / (D + R)))
        fovz = 2.0 * R * sin(atan(DetectorSize[1] / 2.0 / (D + R)))
        self.params['fov'] = fov
        self.params['fovz'] = fovz
        x = np.linspace(-fov / 2.0, fov / 2.0, self.params['ReconX'])
        y = np.linspace(-fov / 2.0, fov / 2.0, self.params['ReconY'])
        z = np.linspace(-fovz / 2.0, fovz / 2.0, self.params['ReconZ'])
        [xx, yy] = np.meshgrid(x, y)
        ReconZ = self.params['ReconZ']
        ProjectionAngle = np.linspace(0, self.params['AngleCoverage'], ns + 1)
        ProjectionAngle = ProjectionAngle[0:-1]
        dtheta = ProjectionAngle[1] - ProjectionAngle[0]
        assert (len(ProjectionAngle == ns))
        print('Reconstruction starts')
        # ki = np.arange(0 - (nx - 1) / 2, nx - (nx - 1) / 2)
        # p = np.arange(0 - (ny - 1) / 2, ny - (ny - 1) / 2)
        ki = np.arange(0, nx) - (nx - 1) / 2.0
        p = np.arange(0, ny) - (ny - 1) / 2.0
        ki = ki * DetectorPixelWidth
        p = p * DetectorPixelHeight
        cutoff = 0.3
        FilterType = 'hamming'
        filter = ConeBeam.Filter(ZeroPaddedLength + 1,
                                 DetectorPixelWidth * R / (D + R), FilterType,
                                 cutoff)
        ki = (ki * R) / (R + D)
        p = (p * R) / (R + D)
        [kk, pp] = np.meshgrid(ki, p)
        # 		sample_points = np.vstack((pp.flatten(), kk.flatten())).T
        weight = R / (sqrt(R**2 + kk**2 + pp**2))
        for i in range(0, ns):
            angle = ProjectionAngle[i]
            if i == 0:
                print("1st projection")
            elif i == 1:
                print("2nd projection")
            elif i == 2:
                print("3rd projection")
            else:
                print(i, 'th projection')
            WeightedProjection = weight * self.proj[:, :, i]
            Q = np.zeros(WeightedProjection.shape)
            for k in range(ny):
                tmp = real(
                    ifft(
                        ifftshift(filter * fftshift(
                            fft(WeightedProjection[k, :], ZeroPaddedLength)))))
                Q[k, :] = tmp[0:nx]
            InterpolationFunction = RegularGridInterpolator((p, ki),
                                                            Q,
                                                            bounds_error=False,
                                                            fill_value=0)
            t = xx * cos(angle) + yy * sin(angle)
            s = -xx * sin(angle) + yy * cos(angle)
            #  			for l in range(0, ReconZ):
            for l in range(255, 256):
                InterpX = (R * t) / (R - s)
                InterpY = (R * z[l]) / (R - s)
                InterpW = (R**2) / ((R - s)**2)
                pts = np.vstack((InterpY.flatten(), InterpX.flatten())).T
                vq = InterpolationFunction(pts)
                recon[l, :, :] += InterpW * dtheta * \
                                  vq.reshape([self.params['ReconX'], self.params['ReconY']])
        # 				Interpolgpu(drv.Out(dest),drv.In(Q),block=())
        # Interpolation required

        self.recon = recon.astype(np.float32)
        recon.tofile(savefile, sep='', format='')
Beispiel #22
0
def barnes(ix, iy, iz, gs=None, nyx=None, limit=None, radius=None,
           gamma=0.3, kappa=None, npasses=3, non_uniform=True,
           yxout=None, first_guess=None, missing=None,
           zrange=None, nonegative=False):
    """
    Implement barnes objective analysis.
    note: 1、not consider pole area, Near the poles,
             an approximate calculation of the distance along
             a great circle arc should be used.

    references:
    Koch, S., M. desJardins,and P. Kocin, 1983: An Interactive Barnes
      Objective Map Analysis Scheme for Use with Satellite and
      Convectional Data. Journal of Appl. Meteor., 22, 1487-1503.
    Barnes, S.L., 1994a: Applications of the Barnes objective analysis scheme
      Part I: Effects of undersampling, wave position, and station randomness.
      J. Atmos. Oceanic Technol. 11, 1433-1448.
    Barnes, S.L., 1994b: Applications of the Barnes objective analysis scheme
      Part II: Improving derivative estimates. J. Atmos. Oceanic Technol. 11,
      1449-1458.
    Barnes, S.L., 1994c: Applications of the Barnes objective analysis scheme
      Part III: Tuning for minimum error. J. Atmos. Oceanic Technol. 11,
      1459-1479.
    Narkhedkar, S. G., S. K. Sinha and A. K. Mitra (2008): Mesoscale
      objective analysis of daily rainfall with satellite and conventional
      data over Indian summer monsoon region. Geofizika, 25, 159-178.
    http://www.atmos.albany.edu/GEMHELP5.2/OABSFC.html

    :param ix: 1D array, station longitude
    :param iy: 1D array, station latitude
    :param iz: 1D array, station observations.
    :param gs: the result grid spacing, [ys, xs], where xs is the
               horizontal spacing between grid points and ys is
               the vertical spacing. Default is the average
               station spaces.
    :param nyx: the result grid size, [ny, nx], where nx is the output
                grid size in the x direction and ny is in the y
                direction. if not be specified, the size will be
                inferred from gs and bounds. if gs and nxy both are specified,
                nxy will overlap gs.
    :param limit: If present, limit must be a four-element array
                  containing the grid limits in x and y of the output
                  grid: [ymin, ymax, xmin, xmax]. If not specified, the
                  grid limits are set to the extent of x and y.
    :param radius: search radius, [y radius, x radius],
                  [40, 40] is default, with 'kappa' units, where kappa is the
                  scale length, which controls the rate of fall-off of the
                  weighting function. Search radius is the max distance that
                  a station may be from a grid point to be used in the analysis
                  for that point.  The search radius will be set so that
                  stations whose weighting factor would be less than
                  EXP (-SEARCH) will not be used.  SEARCH must be in the range
                  1 - 50, such that stations receiving a weight less than
                  EXP(-search) are considered negligible.  Typically a value
                  of 20 is used, which corresponds to a weight threshold of
                  approximately 2e-9. If a very small value is used, many grid
                  points will not have 3 stations within the search area and
                  will be set to the missing data value.
    :param gamma: is a numerical covergence parameter that controls the
                  difference between the weights on the first and second
                  passes, and lies between 0 and 1. Typically a value between
                  .2 and .3 is used. Gamma=0.3 is default.
                  gamma=0.2, minimum smoothing;
                  gamma=1.0, maximum smoothing.
    :param kappa: the scale length, Koch et al., 1983
    :param npasses: 3 passes is default.
                    Set the number of passes for the Barnes analysis to do
                    4 passes recommended for analysing fields where derivative
                    estimates are important (Ref: Barnes 1994b) 3 passes
                    recommended for all other fields (with gain set to 1.0)
                    (Ref: Barnes 1994c "Two pass Barnes Objective Analysis
                    schemes now in use probably should be replaced
                    by appropriately tuned 3 pass or 4 pass schemes") 2 passes
                    only recommended for "quick look" type analyses.
    :param non_uniform: When the data spacing is severely non-uniform,
                        Koch et al. (1983) suggested the data spacing Dn,
                        which has the following form:
                          sqrt(area){(1+sqrt(N))/(N-1)}
    :param yxout: the latitudes and longitudes on the grid where interpolated
                  values are desired (in degrees), list [yout[:], xout[:]]
    :param first_guess: use a model grid as a first guess field for the
                        analysis, which is a dictionary
                        {'data': [ny, nx], 'x': x[nx], 'y': y[ny]}
    :param missing: if set, remove  missing data.
    :param zrange: if set, z which are in zrange are used.
    :param nonegative: if True, negative number were set to 0.0 for return.
    :return: output grid, which is a dictionary
             {'data': [ny, nx], 'x': x[nx], 'y': y[ny]}
    """

    # keep origin data
    x = ix.copy()
    y = iy.copy()
    z = iz.copy()

    # check z shape
    if (len(x) != len(z)) or (len(y) != len(z)):
        raise Exception('z, x, y dimension mismatch.')

    # remove missing values
    if missing is not None:
        index = z != missing
        z = z[index]
        x = x[index]
        y = y[index]

    # control z value range
    if zrange is not None:
        index = np.logical_and(z >= zrange[0], z <= zrange[1])
        z = z[index]
        x = x[index]
        y = y[index]

    # check observation number
    if len(z) < 3:
        return None

    # domain definitions
    if limit is None:
        limit = [np.min(y), np.max(y), np.min(x), np.max(x)]

    # calculate data spacing
    deltan = stations_avg_distance(x, y, non_uniform=non_uniform)

    # gamma parameters
    if gamma < 0.2:
        gamma = 0.2
    if gamma > 1.0:
        gamma = 1.0

    # kappa parameters (the scale length, Koch et al., 1983)
    if kappa is None:
        kappa = 5.052 * (deltan * 2.0 / np.pi) * (deltan * 2.0 / np.pi)

    # search radius
    if radius is None:
        radius = [40., 40.] * kappa

    # define grid size
    #
    # Peterson and Middleton (1963) stated that a wave whose
    # horizontal wavelength does not exceed at least 2*deltan
    # cannot resolved, since five data points are required to
    # describe a wave. Hence deltax(i.e., gs) not be larger than
    # half of deltan. Since a very small grid resolution may
    # produce an unrealistic noisy derivative and if the
    # derivative fields are to represent only resolvable features,
    # the grid length should not be much smaller than deltan.
    # Thus a constraint that deltan/3 <= deltax <= deltan/2 was
    # imposed by Barnes in his interactive scheme.
    if gs is None:
        if nyx is not None:
            gs = [(limit[1] - limit[0])/nyx[0],
                  (limit[3] - limit[2])/nyx[1]]
        else:
            gs = [deltan, deltan] * 0.4
            nyx = [int((limit[1] - limit[0])/gs[0]),
                   int((limit[3] - limit[2])/gs[1])]
    else:
        nyx = [int((limit[1] - limit[0])/gs[0]),
               int((limit[3] - limit[2])/gs[1])]

    # result grid x and y coordinates
    if yxout is None:
        nyx = [len(yxout[0]), len(yxout[1])]
        gs = [yxout[0][1]-yxout[0][0], yxout[1][1]-yxout[1][0]]
    else:
        yxout = [
            scale_vector(
                np.arange(nyx[0], dtype=np.float), limit[0], limit[1]),
            scale_vector(
                np.arange(nyx[1], dtype=np.float), limit[2], limit[3])]

    # define grid
    yout = yxout[0]
    ny = yout.size
    xout = yxout[1]
    nx = xout.size
    g0 = np.full((ny, nx), np.nan)

    # first pass
    indices = []
    distances = []
    for j in range(ny):
        for i in range(nx):
            # points in search radius
            rd = (
                ((xout[i] - x) / radius[0]) ** 2 +
                ((yout[j] - y) / radius[1]) ** 2)
            if np.count_nonzero(rd <= 1.0) < 1:
                indices.append(None)
                distances.append(None)
                continue

            # extract points in search radius
            index = np.nonzero(rd <= 1.0)
            xx = x[index]
            yy = y[index]
            zz = z[index]

            # compute the square distance
            d = (xout[i] - xx) * (xout[i] - xx) +\
                (yout[j] - yy) * (yout[j] - yy) *\
                np.cos(yout[j]) * np.cos(yout[j])

            # compute weights
            w = np.exp(-1.0 * d / kappa)

            # compute grid value
            g0[j, i] = np.sum(w * zz) / np.sum(w)

            # append index and w for computation efficiency
            indices.append(index)
            distances.append(d)

    # initializing first guess with give field
    if first_guess is not None:
        g0 = hinterp(first_guess['data'], first_guess['x'],
                     first_guess['y'], xout, yout)

    # second and more pass
    points = np.vstack((y, x)).T
    for k in range(npasses-1):
        # initializing corrected grid
        g1 = g0.copy()

        # interpolating to points
        interp_func = RegularGridInterpolator((yout, xout), g0)
        z1 = interp_func(points)

        # pass
        num = 0
        for j in range(ny):
            for i in range(nx):
                if indices[num] is None:
                    num += 1
                    continue

                # compute grid value
                index = indices[num]
                zz = z[index] - z1[index]
                d = distances[num]
                w = np.exp(-d / (gamma * kappa))
                g1[j, i] = g0[i, j] + np.sum(w*zz)/np.sum(w)
                num += 1

        # update g0
        g0 = g1.copy()

    # set negative value to zero
    if nonegative:
        g0[g0 < 0] = 0.0

    # return grid
    return {'data': g0, 'x': xout, 'y': yout}
Beispiel #23
0
ds_list.sort(key=takeSpaceLocation)
data_volume = np.zeros([rawx, rawy, rawz])
for i in range(rawz):
    data_volume[:, :, i] = ds_list[i].pixel_array
# for aligning with stl surface data and interpolator
data_volume = np.swapaxes(data_volume, 0, 1)
data_volume = np.flip(data_volume, 1)

# Spacing grids
x = np.linspace(0, float(ds_list[0].PixelSpacing[0]) * rawx, rawx, endpoint=False)
y = np.linspace(-float(ds_list[0].PixelSpacing[1]) * (rawy - 1), 0, rawy, endpoint=True)
z = np.linspace(float(ds_list[0].ImagePositionPatient[2]),
                float(ds_list[-1].ImagePositionPatient[2]), rawz, endpoint=True)

# Interpolation
volume_interp_func = RegularGridInterpolator((x, y, z), data_volume, bounds_error=False, fill_value=0)

# Create source plane
slice_sz = 256
source_pts = np.zeros([4, slice_sz*slice_sz])
for i in range(slice_sz):
    for j in range(slice_sz):
        source_pts[0, i * slice_sz + j] = i - slice_sz / 2
        source_pts[1, i * slice_sz + j] = j - 9
        source_pts[3, i * slice_sz + j] = 1
source_anchor_pts = np.transpose(np.array([[0, 0, 0, 1], [-slice_sz / 2, slice_sz-9, 0, 1], [slice_sz / 2, slice_sz-9, 0, 1]]))

# Read skin surface mesh for placing images and us fan-shape mask
mesh_test = mesh.Mesh.from_file('./STLRead/surface_skin_LPS_simplified.stl')
us_mask = imageio.imread('./us_mask.bmp')
Beispiel #24
0
    def __call__(self,
                 simspace: SimulationSpace,
                 wlen: float = None,
                 **kwargs) -> fdfd_tools.VecField:
        matpath = os.path.join(simspace._filepath, self._params.file_name)
        overlap = sio.loadmat(matpath)

        # Use reference_grid to get coords which the overlap fields are defined on.
        reference_grid = simspace(wlen).eps_bg
        overlap_grid = np.zeros(reference_grid.grids.shape, dtype=np.complex_)

        xyz = reference_grid.xyz
        dxyz = reference_grid.dxyz
        shifts = reference_grid.shifts

        overlap_comp = ["Ex", "Ey", "Ez"]
        overlap_center = self._params.center

        overlap_coords = [
            overlap["x"][0] + overlap_center[0],
            overlap["y"][0] + overlap_center[1],
            overlap["z"][0] + overlap_center[2]
        ]

        # The interpolation done below only works on three-dimensional grids with each dimension containing
        # more than a single grid point (i.e. no two-dimensional grids). Therefore, if a dimension has a
        # singleton grid point, we duplicate along that axis to create a pseudo-3D grid.
        coord_dims = np.array([
            overlap_coords[0].size, overlap_coords[1].size,
            overlap_coords[2].size
        ])
        singleton_dims = np.where(coord_dims == 1)[0]
        if not singleton_dims.size == 0:
            for axis in singleton_dims:
                # The dx from the SPINS simulation grid is borrowed for the replication.
                dx = dxyz[axis][0]
                coord = overlap_coords[axis][0]
                overlap_coords[axis] = np.insert(overlap_coords[axis], 0,
                                                 coord - dx / 2)
                overlap_coords[axis] = np.append(overlap_coords[axis],
                                                 coord + dx / 2)
                # Repeat the overlap fields along the extended axis
                for comp in overlap_comp:
                    overlap[comp] = np.repeat(overlap[comp],
                                              overlap_coords[axis].size, axis)

        for i in range(0, 3):

            # Interpolate the user-specified overlap fields for use on the simulation grids
            overlap_interp_function = RegularGridInterpolator(
                (overlap_coords[0], overlap_coords[1], overlap_coords[2]),
                overlap[overlap_comp[i]],
                bounds_error=False,
                fill_value=0.0)

            # Grid coordinates for each component of Electric field. Shifts due to Yee lattice offsets.
            # See documentation of ``Grid" class for more detailed explanation.
            xs = xyz[0] + dxyz[0] * shifts[i, 0]
            ys = xyz[1] + dxyz[1] * shifts[i, 1]
            zs = xyz[2] + dxyz[2] * shifts[i, 2]

            # Evaluate the interpolated overlap fields on simulationg rids
            eval_coord_grid = np.meshgrid(xs, ys, zs, indexing='ij')
            eval_coord_points = np.reshape(eval_coord_grid, (3, -1),
                                           order='C').T
            interp_overlap = overlap_interp_function(eval_coord_points)
            overlap_grid[i] = np.reshape(interp_overlap,
                                         (len(xs), len(ys), len(zs)),
                                         order='C')

        return overlap_grid
Beispiel #25
0
def interpolate_2D(x,y,z,profile_len,reso,step):
	f = RegularGridInterpolator((x, y), z) #interpolate.interp2d(y,x,z,kind='linear')
	return f
Beispiel #26
0
def build_large_phantom_dataset(gpu_id=1):
    """Builds dataset with anthropomorphic phantoms too large to fit in memory"""
    import astra

    @utils.timeit
    def create_half_CB_projection(ct_volume,
                                  scanner_params,
                                  proj_vecs,
                                  voxel_size=.1,
                                  **kwargs):
        """
            Args:
            -----
                ct_volume (np.ndarray): [z,x,y] axis order at import
                scanner_params (class): scanner class to get relevant metadata from
                proj_vecs (np.ndarray): vecs of the scan trajectory
                voxel_size (float): voxel size in mm

                --optional--
                gpu_id (int): GPU to run astra on, can be set in globals(), defaults to -1 otherwise

            Returns:
            --------
                projections (np.ndarray): projection data
        """

        astra.astra.set_gpu_index(globals().get('GPU_ID',
                                                kwargs.get('gpu_id', -1)))

        source_height = proj_vecs[0, 2]
        split_line = int(
            np.round(len(ct_volume) / 2 + source_height / voxel_size))
        print(len(ct_volume), split_line)
        z_dims = list(
            map(lambda x: x * len(ct_volume) / 2 * voxel_size,
                [-1, -1 + 2 * split_line / len(ct_volume), 1]))

        full_projections = None  #np.empty((len(proj_vecs), *scanner_params.detector_binned_size))
        print(z_dims)

        for i in range(2):
            half_volume = ct_volume[:split_line +
                                    1] if not i else ct_volume[split_line:]

            # [y,x,z] axis order for size, [x,y,z] for volume shape, ...why?
            vol_geom = astra.creators.create_vol_geom(
                *np.transpose(half_volume, (1, 2, 0)).shape,
                *[
                    sign * size / 2 * voxel_size
                    for size in half_volume.shape[-2:][::-1]
                    for sign in [-1, 1]
                ],
                *z_dims[i:i + 2],
            )

            # [z,x,y] axis order for volume data
            proj_id = astra.data3d.create('-vol', vol_geom, data=half_volume)
            proj_geom = astra.create_proj_geom(
                'cone_vec', *scanner_params.detector_binned_size, proj_vecs)

            projections_id, projections = astra.creators.create_sino3d_gpu(
                proj_id, proj_geom, vol_geom)

            astra.data3d.delete([proj_id, projections_id])

            if not i:
                full_projections = projections
                # utils.save_vid('outputs/half_projection_top.avi', np.transpose(projections, (1,0,2))[..., None])
            else:
                full_projections += projections
                # utils.save_vid('outputs/half_projection_bottom.avi', np.transpose(projections, (1,0,2))[..., None])
                # sys.exit()

        # from [rows,proj_slc,cols] to [proj_slc,rows,cols]
        return np.transpose(full_projections, (1, 0, 2))

    DATA_PATH = Path('/data/fdelberghe/')
    save_folder = DATA_PATH / 'PhantomsRadial/'

    phantom_folders = natsorted(DATA_PATH.glob('AxialPhantoms/*'))

    scanner_params = FleX_ray_scanner()
    scanner_trajs = [
        astra_sim.create_scan_geometry(scanner_params,
                                       n_projs=1200,
                                       elevation=el) for el in [-12, 0, 12]
    ]

    theta_range = np.linspace(0,
                              np.pi,
                              int(np.round(np.sqrt(2) * 501)),
                              endpoint=False)

    for folder in [phantom_folders[1], phantom_folders[-1]]:
        print(f"Loading {folder}/...")

        axial_ims = sorted(folder.glob('*.tif'), key=utils._nat_sort)
        input_volume = np.stack([imread(file) for file in axial_ims],
                                axis=0).astype('float32')
        # Estimates the air density from mean intensity at the edges of the volume
        dark_field = np.mean([
            input_volume[0].mean(), input_volume[:, 0].mean(),
            input_volume[:, :, 0].mean(), input_volume[:, :, -1].mean()
        ])
        input_volume = (input_volume - dark_field) / (input_volume.max() -
                                                      dark_field)

        interp_shape = (501, 501)
        # # interp the volume in a box the size of the largest axis
        max_in_dim = max(input_volume.shape)

        # Creates grid center on volume center regardless of volume shape
        z_gr = np.linspace(
            -input_volume.shape[0] / interp_shape[0] / max_in_dim * 501,
            input_volume.shape[0] / interp_shape[0] / max_in_dim * 501,
            input_volume.shape[0])
        x_gr, y_gr = [
            np.linspace(
                -input_volume.shape[j] / interp_shape[1] / max_in_dim * 501,
                input_volume.shape[j] / interp_shape[1] / max_in_dim * 501,
                input_volume.shape[j]) for j in range(1, 3)
        ]

        interp = RegularGridInterpolator((z_gr, x_gr, y_gr),
                                         input_volume,
                                         fill_value=0,
                                         bounds_error=False)

        z_gr = np.linspace(-1.0, 1.0, interp_shape[0])
        xy_gr = np.linspace(-1.0, 1.0, interp_shape[1])

        z_rad = np.vstack((z_gr, ) * interp_shape[1]).T

        (save_folder / folder.name).mkdir(parents=True, exist_ok=True)

        for j in range(len(theta_range)):
            x_rad = np.vstack(
                (xy_gr * np.cos(theta_range[j]), ) * interp_shape[0])
            y_rad = np.vstack(
                (xy_gr * -np.sin(theta_range[j]), ) * interp_shape[0])

            rad_slices_input = interp(
                np.vstack((z_rad.flatten(), x_rad.flatten(),
                           y_rad.flatten())).T).reshape(interp_shape)

            print(f"\rSaving {folder.name}/CT_target_s{j+1:0>3d}", end=' ' * 5)
            imsave((save_folder / folder.name / f'CT_target_s{j+1:0>4d}.tif'),
                   rad_slices_input.astype('float32'))
        print('')

        # Voxel size for whole cube volume within scan FoV
        vox_sz = scanner_params.source_origin_dist / (
            scanner_params.source_detector_dist / min(scanner_params.FoV) +
            .5) / max_in_dim

        for i in range(len(scanner_trajs)):
            projections = create_half_CB_projection(input_volume,
                                                    scanner_params,
                                                    proj_vecs=scanner_trajs[i],
                                                    voxel_size=vox_sz,
                                                    gpu_id=gpu_id)
            reconstructed_volume = astra_sim.FDK_reconstruction(
                projections,
                scanner_params,
                proj_vecs=scanner_trajs[i],
                voxel_size=vox_sz * max_in_dim / 501,
                gpu_id=gpu_id)

            rad_slices_CB = radial_slice_sampling(reconstructed_volume,
                                                  theta_range)

            for j in range(len(theta_range)):
                print(f"\rSaving {folder.name}/CB_source_s{j+1:0>3d}",
                      end=' ' * 5)
                imsave((save_folder / folder.name /
                        f'CB_source_orbit{i+1:0>2d}_s{j+1:0>4d}.tif'),
                       rad_slices_CB[j])
            print('')
def Plot_Ewald_Sphere_Correction(D, wavelength_angstroms, ucell=[], cscale=1, lcscale=1, **kwargs):

    """ pass full 3d data,SF,wavelength in angstroms """
    # cscale : factor by which to scale the maximum value of the colorbar
    # lcscale : factor by which to scale the maximum value of the colorbar

    if not os.path.exists(path):
        os.makedirs(path)

    X = D[:, 0, 0, 0]
    Y = D[0, :, 0, 1]
    Z = D[0, 0, :, 2]
    SF = D[:, :, :, 3]

    K_ES = 2.0*math.pi/wavelength_angstroms  # calculate k for incident xrays in inverse angstroms

    ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)

    xypts = []
    for ix in range(D.shape[0]):
        xsq = X[ix]**2.0
        for iy in range(D.shape[1]):
            theta = np.arctan(old_div(np.sqrt(xsq + Y[iy]**2.0),K_ES))
            xypts.append((X[ix]*np.cos(theta), Y[iy]*np.cos(theta), K_ES*(1.0 - np.cos(theta))))

    xzpts = []
    for ix in range(D.shape[0]):
        xsq = X[ix]**2.0
        for iz in range(D.shape[2]):
            theta = np.arctan(old_div(np.sqrt(xsq + Z[iz]**2.0),K_ES))
            xzpts.append((X[ix]*np.cos(theta), K_ES*(1.0-np.cos(theta)), Z[iz]*np.cos(theta)))

    yzpts = []
    for iy in range(D.shape[1]):
        ysq = Y[iy]**2.0
        for iz in range(D.shape[2]):
            theta = np.arctan(old_div(np.sqrt(ysq+Z[iz]**2.0),K_ES))
            yzpts.append((K_ES*(1.0-np.cos(theta)), Y[iy]*np.cos(theta), Z[iz]*np.cos(theta)))

    xypts = np.asarray(xypts)
    xzpts = np.asarray(xzpts)
    yzpts = np.asarray(yzpts)

    EWDxy = ES(xypts)
    EWDxz = ES(xzpts)
    EWDyz = ES(yzpts)

    EWDxy = EWDxy.reshape(D.shape[0], D.shape[1])
    EWDxz = EWDxz.reshape(D.shape[0], D.shape[2])
    EWDyz = EWDyz.reshape(D.shape[1], D.shape[2])

    title = "Ewald Corrected Structure Factor \n $\lambda=$"+str(wavelength_angstroms)+" $\AA$   $k_{ew}=$"+str(round(K_ES,2))+" $\AA^{-1}$"
    ltitle = 'log ' + title

    xlab = 'x (' + units + ")"
    ylab = 'y (' + units + ")"
    zlab = 'z (' + units + ")"

    fname = "Ewald_"

    plt.figure(1)
    plt.suptitle(title)
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    EWDmax_xy = np.amax(EWDxy)
    plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], EWDxy, contours, vmax=cscale*EWDmax_xy, **kwargs)
    plt.savefig(path + fname + "xy" + format, dpi=DPI)
    plt.clf()

    plt.figure(2)
    plt.suptitle(ltitle)
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    EWDmax_xylog = np.amax(np.log(EWDxy))
    plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], np.log(EWDxy), contours, vmax=lcscale*EWDmax_xylog, **kwargs)
    plt.savefig(path + fname + "xylog" + format, dpi=DPI)
    plt.clf()

    plt.figure(3)
    plt.suptitle(title)
    plt.xlabel(xlab)
    plt.ylabel(zlab)
    EWDmax_xz = np.amax(EWDxz)
    plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], EWDxz, contours, vmax=cscale*EWDmax_xz, **kwargs)
    plt.savefig(path + fname + "xz" + format, dpi=DPI)
    plt.clf()

    plt.figure(4)
    plt.suptitle(ltitle)
    plt.xlabel(xlab)
    plt.ylabel(zlab)
    EWDmax_xzlog = np.amax(np.log(EWDxz))
    plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], np.log(EWDxz), contours, vmax=lcscale*EWDmax_xzlog, **kwargs)
    lims = [np.amax(D[:, 0, :, 0]), np.amax(D[:, 0, :, 2])]
    qmax = min(lims)
    plt.xlim([-qmax, qmax])
    plt.ylim([-qmax, qmax])
    plt.savefig(path + fname + "xzlog" + format, dpi=DPI)
    plt.clf()

    plt.figure(5)
    plt.suptitle(title)
    plt.xlabel(ylab)
    plt.ylabel(zlab)
    EWDmax_yz = np.amax(EWDyz)
    plt.contourf(D[0, :, :, 1], D[0, :, :, 2], EWDyz, contours, vmax=cscale*EWDmax_yz, **kwargs)
    plt.savefig(path + fname + "yz" + format, dpi=DPI)
    plt.clf()

    plt.figure(6)
    plt.suptitle(ltitle)
    plt.xlabel(ylab)
    plt.ylabel(zlab)
    EWDmax_yzlog = np.amax(np.log(EWDyz))
    plt.contourf(D[0, :, :, 1], D[0, :, :, 2], np.log(EWDyz), contours, vmax=lcscale*EWDmax_yzlog, **kwargs)
    plt.savefig(path + fname + "yzlog" + format, dpi=DPI)
    plt.clf()
Beispiel #28
0
def build_abdo_dataset(gpu_id=0):

    import astra
    astra.astra.set_gpu_index(gpu_id)

    data_path = Path('/data/maureen_shares/BoneMetastases_NIfTI_BodyRef')
    nii_filenames = sorted(data_path.glob('*.nii'), key=utils._nat_sort)

    save_path = Path('/data/fdelberghe/AbdoScans')

    scanner_params = FleX_ray_scanner()
    scanner_trajs = [
        astra_sim.create_scan_geometry(scanner_params,
                                       n_projs=1200,
                                       elevation=el) for el in [-15, 0, 15]
    ]

    theta_range = np.linspace(0,
                              np.pi,
                              int(np.round(np.sqrt(2) * 501)),
                              endpoint=False)

    interp_shape = (501, 501)

    for i, filename in enumerate(nii_filenames):
        if i <= 5: continue
        nii_file = nib.load(filename)
        input_volume = np.transpose(nii_file.get_fdata(), (2, 1, 0))[::-1]
        input_volume /= input_volume.max()

        # input_volume = 1/ (1+np.exp(-(input_volume -1600)*np.log(4)/400))
        # utils.save_vid(f'outputs/projections.avi', input_volume[::-1,...,None])

        # [x,y,z] scaling factors
        scaling = np.diagonal(nii_file.affine)[:3]
        volume_size = list(
            map(lambda size, scale: size * scale, input_volume.shape,
                scaling[::-1]))

        # interp the volume in a box the size of the largest axis
        max_in_size = max(volume_size) * 1.2

        # Creates grid center on volume center regardless of volume shape
        z_gr = np.linspace(-volume_size[0] / max_in_size,
                           volume_size[0] / max_in_size, input_volume.shape[0])
        x_gr, y_gr = [
            np.linspace(-volume_size[j] / max_in_size,
                        volume_size[j] / max_in_size, input_volume.shape[j])
            for j in range(1, 3)
        ]

        interp = RegularGridInterpolator((z_gr, x_gr, y_gr),
                                         input_volume,
                                         fill_value=0,
                                         bounds_error=False)

        z_gr = np.linspace(-1.0, 1.0, interp_shape[0])
        xy_gr = np.linspace(-1.0, 1.0, interp_shape[1])

        z_rad = np.vstack((z_gr, ) * interp_shape[1]).T

        save_folder = save_path / f'Volume{i+1}'
        save_folder.mkdir(parents=True, exist_ok=True)

        for j in trange(len(theta_range),
                        desc=f"Saving {save_folder.name}/CT_target"):
            x_rad = np.vstack(
                (xy_gr * np.cos(theta_range[j]), ) * interp_shape[0])
            y_rad = np.vstack(
                (xy_gr * -np.sin(theta_range[j]), ) * interp_shape[0])

            rad_slice = interp(
                np.vstack((z_rad.flatten(), x_rad.flatten(),
                           y_rad.flatten())).T).reshape(interp_shape)

            imsave((save_folder / f'CT_target_s{j+1:0>4d}.tif'),
                   rad_slice.astype('float32'))
        print('')

        vox_sz = scanner_params.source_origin_dist / (
            scanner_params.source_detector_dist / min(scanner_params.FoV) + .5)
        print(vox_sz)

        for i, scanner_traj in enumerate(scanner_trajs):

            # ============================== #
            # [y,x,z] axis order for size, [x,y,z] for volume shape, why...?
            vol_geom = astra.creators.create_vol_geom(
                *np.transpose(input_volume, (1, 2, 0)).shape, *[
                    sign * volume_size[k] / 2 / vox_sz for k in [2, 1, 0]
                    for sign in [-1, 1]
                ])

            # [z,x,y] axis order for volume data
            proj_id = astra.data3d.create('-vol', vol_geom, data=input_volume)
            proj_geom = astra.create_proj_geom(
                'cone_vec', *scanner_params.detector_effective_size,
                scanner_traj)

            projections_id, projections = astra.creators.create_sino3d_gpu(
                proj_id, proj_geom, vol_geom)

            astra.data3d.delete(projections_id)

            # from [rows,proj_slc,cols] to [proj_slc,rows,cols]
            projections = np.transpose(projections, (1, 0, 2))
            # ============================== #

            # projections = astra_sim.create_CB_projection(input_volume, scanner_params, proj_vecs=scanner_traj, voxel_size=vox_sz, gpu_id=gpu_id)
            utils.save_vid(f'outputs/projections.avi', projections[..., None])
            reconstructed_volume = astra_sim.FDK_reconstruction(
                projections,
                scanner_params,
                proj_vecs=scanner_traj,
                voxel_size=vox_sz / 501,
                gpu_id=gpu_id)

            rad_slices_CB = radial_slice_sampling(reconstructed_volume,
                                                  theta_range)

            for j in trange(
                    len(theta_range),
                    desc=f"Saving {save_folder.name}/CB_source_orbit{i+1:0>2d}"
            ):
                imsave(
                    save_folder / f'CB_source_orbit{i+1:0>2d}_s{j+1:0>4d}.tif',
                    rad_slices_CB[j].astype('float32'))
            print('')
def PLOT_RAD_NEW(D, wavelength_angstroms, ucell, format=False, factor=3.1, **kwargs):

    """
    :param D: raw structure factor
    :param wavelength_angstroms: wavelength of X-ray (angstroms)
    :param ucell: 3 x 3 unitcell vectors
    :param factor: maximum colorbar value if using formatting from Coscia et al. manuscript
    :param format: plot simulated XRD patterns as they appear in Coscai et al. manuscript
    :return:
    """

    if not os.path.exists(path):
        os.makedirs(path)

    # inverse_ft(D, ucell)

    X = D[:, 0, 0, 0]
    Y = D[0, :, 0, 1]
    Z = D[0, 0, :, 2]
    SF = D[..., 3]

    ############## Plot z-slice down the middle of the raw structure factor ###################
    # plt.plot(Z, SF[len(X)//2, len(Y)//2, :])
    # plt.xlabel('q$_z$ ($\AA^{-1}$)')
    # plt.ylabel('Intensity')
    # plt.savefig('z_section.png')
    # plt.show()
    # exit()

    ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)

    THETA_BINS_PER_INV_ANG = 20.
    MIN_THETA_BINS = 1  # minimum allowed bins
    RBINS = 400
    NLEVELS = 200  # number of levels for contour plots

    a1 = ucell[0]
    a2 = ucell[1]
    a3 = ucell[2]

    b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
    b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
    b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))

    b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))

    ZBINS = Z.shape[0]  # 400
    XR = (X[-1] - X[0])*ucell[0][0]
    YR = (Y[-1] - Y[0])*ucell[1][1]

    Rmax = min(XR, YR) / 2.0
    Rmax *= 0.95

    rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
    zar = np.linspace(Z[0], Z[-1], ZBINS)

    oa = np.zeros((rarr.shape[0], zar.shape[0]))
    circ = 2.*np.pi*rarr  # circumference

    for ir in trange(rarr.shape[0]):

        NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS)  #calculate number of bins at this r
        thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False)  # generate theta array

        t, r, z = np.meshgrid(thetas, rarr[ir], zar)  # generate grid of cylindrical points

        xar = r*np.cos(t)  # set up x,y coords
        yar = r*np.sin(t)

        pts = np.vstack((xar.ravel(), yar.ravel(), z.ravel())).T  # reshape for interpolation

        MCpts = np.matmul(pts, b_inv)  # slower: MCpts = mc_inv(pts,ucell)

        oa[ir, :] = np.average(ES(MCpts).reshape(r.shape), axis=1)  # store average values in final array

    mn = np.nanmin(oa)
    oa = np.where(np.isnan(oa), mn, oa)

    if not format:
        rad_avg = np.average(oa)
        oa /= rad_avg  # normalize

    # set up data for contourf plot by making it symmetrical
    final = np.append(oa[::-1, :], oa[1:], axis=0)  # SF
    rfin = np.append(-rarr[::-1], rarr[1:])  # R
    zfin = np.append(z[:, 0, :], z[1:, 0, :], axis=0)  # Z

    unitlab = '($\AA^{-1}$)'  # Angstroms

    logfinal = np.log(final)

    MIN = np.amin(final)  # MIN = np.amin(np.ma.masked_invalid(final))
    MAX = np.amax(final)  # MAX = np.amax(np.ma.masked_invalid(final))

    lvls = np.linspace(MIN, MAX, NLEVELS)

    if format:
        alkane_intensity = normalize_alkanes(rfin, zfin[0], final, 1.4, 1.57, 120)  # 1.4, 1.57
        final /= alkane_intensity  # normalize according to R-alkanes

    # lvls = np.linspace(0, factor, NLEVELS)  # contour levels
    rlimits = [np.argmin(np.abs(rfin + 2.5)), np.argmin(np.abs(rfin - 2.5))]
    zlimits = [np.argmin(np.abs(zfin[0] + 2.5)), np.argmin(np.abs(zfin[0] - 2.5))]

    MIN = np.amin(final[rlimits[0]:rlimits[1], zlimits[0]:zlimits[1]])
    MIN = 0.4
    # MAX = 7.67

    #lvls = np.linspace(np.log10(MIN), np.log10(MAX), NLEVELS)

    if format:

        cmap = 'jet'
        print(factor)
        lvls = np.linspace(0, factor, NLEVELS)
        lvls_log = np.linspace(np.log10(final[-1, -1]), np.log10(np.amax(final)), NLEVELS)

        # plot 1D SAXS
        plt.figure()
        plt.plot(rfin, final[:, zfin[0].shape[0]//2], linewidth=2)
        plt.xlabel('$q_r\ (\AA^{-1})$', fontsize=14)
        plt.ylabel('Intensity', fontsize=14)
        plt.tight_layout()

        plt.figure()

        heatmap = plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap=cmap, extend='max')
        cbar = plt.colorbar(heatmap)
        plt.xlabel('$q_r\ (\AA^{-1}$)', fontsize=18)
        plt.ylabel('$q_z\ (\AA^{-1}$)', fontsize=18)
        plt.gcf().get_axes()[0].set_ylim(-2.5, 2.5)
        plt.gcf().get_axes()[0].set_xlim(-2.5, 2.5)
        plt.gcf().get_axes()[0].tick_params(labelsize=14)
        plt.gcf().get_axes()[0].set_aspect('equal')
        plt.tight_layout()
        plt.savefig('rzplot.png')
        print('rzplot.png saved')

        ################# Q_R and Q_Z CROSS_SECTIONS OF R_PI WITH GAUSSIAN AND LORENTZIAN FITS ##################
        ############################### FIT TO QR CROSS-SECTION OF R-PI #########################

        plt.figure()

        rpi_ndx = np.argmin(np.abs(zfin[0] - zfin[0][np.argmax(final[rfin.size // 2, :])]))

        plt.plot(rfin, final[:, rpi_ndx], linewidth=2, color='blue')  # its xkcd:blue in paper

        p = np.array([0, 0.3, 4, 1])
        solp, cov_x = curve_fit(gaussian, rfin, final[:, rpi_ndx], p,
                                bounds=((-np.inf, 0, 0, 0), (np.inf, np.inf, np.inf, np.inf)))

        plt.plot(rfin, gaussian(rfin, solp[0], solp[1], solp[2], solp[3]), '--', color='blue', label='Gaussian Fit',
                 linewidth=2)

        print("Gaussian FWHM = %.3f +/- %.3f A^-1" % (2*np.sqrt(2*np.log(2))*solp[1],
                                               2 * np.sqrt(2 * np.log(2)) * cov_x[1, 1] ** 0.5))

        p = np.array([0.1, 0, 4])
        solp_lorentz, cov_x = curve_fit(lorentz, rfin, final[:, rpi_ndx], p,
                                bounds=[[0, -np.inf, 0], [np.inf, np.inf, np.inf]])

        plt.plot(rfin, lorentz(rfin, solp_lorentz[0], solp_lorentz[1], solp_lorentz[2]), '--', label='Lorentzian Fit',
                 linewidth=2, color='orange')  # its xkcd:orange in the paper

        print("Lorentzian FWHM = %.3f +/- %.3f A^-1" % (solp_lorentz[0], cov_x[0, 0] ** 0.5))

        plt.legend(fontsize=16)
        plt.xlabel('$q_r\ (\AA^{-1})$', fontsize=18)
        plt.ylabel('Intensity', fontsize=18)
        plt.gcf().get_axes()[0].tick_params(labelsize=18)
        plt.tight_layout()
        #plt.savefig('/home/bcoscia/PycharmProjects/LLC_Membranes/Ben_Manuscripts/structure_paper/figures/sim_rsection_fit.pdf')

        ######################## FIT TO QZ CROSS-SECTION OF R-PI #########################
        plt.figure()

        rndx = rfin.size // 2
        zstart = zfin[0].size // 2
        plt.plot(zfin[0][zstart:], final[rndx, zstart:], linewidth=2, color='blue')

        p = np.array([1.4, 0.1, 7, 0])
        solp, cov_x = curve_fit(gaussian, zfin[0][zstart:], final[rndx, zstart:], p,
                                bounds=([-np.inf, 0, 0, 0], [np.inf, np.inf, np.inf, np.inf]))

        fine_grid = np.linspace(zfin[0][zstart], zfin[0][-1], 1000)
        plt.plot(fine_grid, gaussian(fine_grid, solp[0], solp[1], solp[2], solp[3]), '--', color='blue', label='Gaussian Fit',
                 linewidth=2)

        print("Gaussian FWHM = %.3f +/- %.3f A^-1" % (2*np.sqrt(2*np.log(2))*solp[1],
                                               2 * np.sqrt(2 * np.log(2)) * cov_x[1, 1] ** 0.5))

        p = np.array([0.1, 0, 4])
        solp_lorentz, cov_x = curve_fit(lorentz, zfin[0][zstart:], final[rndx, zstart:], p,
                                bounds=[[0, -np.inf, 0], [np.inf, np.inf, np.inf]])

        plt.plot(fine_grid, lorentz(fine_grid, solp_lorentz[0], solp_lorentz[1], solp_lorentz[2]), '--',
                 label='Lorentzian Fit', linewidth=2, color='orange')

        print("Lorentzian FWHM = %.3f +/- %.3f A^-1" % (solp_lorentz[0], cov_x[0, 0] ** 0.5))

        plt.legend(fontsize=17)
        plt.xlabel('$q_z\ (\AA^{-1})$', fontsize=18)
        plt.ylabel('Intensity', fontsize=18)
        plt.gcf().get_axes()[0].tick_params(labelsize=18)
        plt.tight_layout()
        #plt.savefig('/home/bcoscia/PycharmProjects/LLC_Membranes/Ben_Manuscripts/structure_paper/figures/sim_zsection_fit.pdf')

        print('Average R-pi intensity: %.2f' % np.amax(final[rfin.size // 2, :]))
        #print('Average R-spots intensity : %.2f' % Rspots(rfin, zfin[0], final.T, theta=30, theta_sigma=(1, 1),
                                                          #bounds=(1.39, 1.49), cmap=cmap))

    else:

        plt.figure()
        plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet')
        plt.colorbar()

        plt.title('S(r,z)')
        plt.xlabel('r ' + unitlab)
        plt.ylabel('z ' + unitlab)

        plt.savefig('new_rzplot.png')

        plt.figure()

        cs = plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet', extend='both')
        cs.cmap.set_under('k')
        cs.set_clim(MIN, 0.1 * MAX)
        plt.title('S(r,z)')
        plt.xlabel('r ' + unitlab)
        plt.ylabel('z ' + unitlab)
        plt.colorbar()
        plt.savefig('cs.png')

        plt.figure()

        plt.contourf(rfin, zfin[0], final.T, levels=lvls, cmap='jet')
        plt.colorbar()

        plt.title('S(r,z)')
        plt.xlabel('r ' + unitlab)
        plt.ylabel('z ' + unitlab)
        plt.savefig('new_rzplot2.png')

        plt.figure()
        lglvls = np.linspace(np.amin(logfinal), np.amax(logfinal), NLEVELS)

        plt.contourf(rfin, zfin[0], logfinal.T, levels=lglvls, cmap='jet')
        plt.colorbar()

        plt.title('ln(S(r,z))')
        plt.xlabel('r ' + unitlab)
        plt.ylabel('z ' + unitlab)
        plt.savefig('new_log_rzplot.png')

        plt.figure()

        x2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)
        z2 = np.linspace(Z[0], Z[-1], RBINS)

        xg2, yg2, zg2 = np.meshgrid(x2, np.asarray(0), z2)
        pts = np.vstack((xg2.ravel(), yg2.ravel(), zg2.ravel())).T
        out2 = ES(pts).reshape(xg2.shape[1], xg2.shape[2])

        o2n = out2[:, :] / rad_avg

        plt.contourf(xg2[0, :, :], zg2[0, :, :], o2n, levels=lvls, cmap='jet')

        plt.xlabel('x ' + unitlab)
        plt.ylabel('z ' + unitlab)
        plt.title('S(x,z)|$_{y=0}$')

        plt.colorbar()
        plt.savefig('new_xzplot.png')

        plt.figure()

        x2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)
        y2 = np.linspace(-Rmax, Rmax, RBINS * 2 - 1)

        xg2, yg2, zg2 = np.meshgrid(x2, y2, np.asarray(0))
        pts = np.vstack((xg2.ravel(), yg2.ravel(), zg2.ravel())).T
        out2 = ES(pts).reshape(xg2.shape[0], xg2.shape[1])

        o2n = out2[:, :] / np.average(out2)

        lvlsxy = np.linspace(np.amin(o2n), np.amax(o2n), NLEVELS)  # contour levels

        plt.contourf(xg2[:, :, 0], yg2[:, :, 0], o2n, levels=lvlsxy, cmap='jet')

        plt.xlabel('x ' + unitlab)
        plt.ylabel('y ' + unitlab)
        plt.title('S(x,y)')  # |$_{y=0}$')

        plt.colorbar()
        plt.savefig('new_xyplot.png')

        if False:

            plt.figure()

            dif = o2n - final
            lvls2 = np.linspace(-0.4, 0.4, 100)

            plt.contourf(xg2[0, :, :], zg2[0, :, :], dif, levels=lvls2, cmap='seismic')
            plt.xlabel('x,r ' + unitlab)
            plt.ylabel('z ' + unitlab)
            plt.title('S(r,z)-S(x,z)|$_{y=0}$')

            plt.colorbar()
            plt.savefig('difference.png')

    plt.show()
Beispiel #30
0
 def test_linear_xi1d(self):
     points, values = self._get_sample_4d_2()
     interp = RegularGridInterpolator(points, values)
     sample = np.asarray([0.1, 0.1, 10., 9.])
     wanted = 1001.1
     assert_array_almost_equal(interp(sample), wanted)