def model(x, y):

        spline = SmoothBivariateSpline(
            width, eqPonA, factor, kx=2, ky=1)

        result = spline.ev(x, y)

        return result
Esempio n. 2
0
def plot_interpolated_image(P, I, file_name='plots/img.png', pregrid=False, dim=50):

    plt.clf()
    
    print 'plotting columns of matrix, shape: ', I.shape
    if (I.ndim == 1):
        I = I[:, None]
        f = 1
    else:
        f = int(np.sqrt(I.shape[1]))

    for i in xrange(f**2):

        ax = plt.subplot(f,f,i+1)
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)


        xlim = ylim = 0.9
        xi = np.linspace(-xlim, xlim, dim)
        yi = np.linspace(-ylim, ylim, dim)
        XI, YI = np.meshgrid(xi, yi)

        # grid the data.
        # contour the gridded data, plotting dots at the randomly spaced data points.

        if pregrid:
            # grid then spline smooth
            ZI = griddata((P[:, 0], P[:, 1]), I[:, i],
                          (xi[None, :], yi[:, None]),
                          method='linear',
                          fill_value=np.mean(I[:, i]))

            spline = SmoothBivariateSpline(XI.flatten(), YI.flatten(), ZI.flatten())

        else:
            spline = SmoothBivariateSpline(P[:, 0], P[:, 1], I[:, i])


        zi = spline.ev(XI.flatten(), YI.flatten())
        zi = np.reshape(zi, (dim,dim))

        # rbf = Rbf(XI, YI, zi, epsilon=1e-1)
        # zz = rbf(XI, YI)

        plt.imshow(zi, interpolation='nearest')

        #plt.contour(xi,yi,zi,10, linewidths=0.5,colors='k')
        #plt.contourf(xi,yi,zi,10, cmap=plt.cm.spectral)
        #ax.scatter(P[:,0], P[:,1], c='k', s=2, linewidths=0)
        #plt.xlim(-1, 1)
        #plt.ylim(-1, 1)

    plt.savefig(file_name)
Esempio n. 3
0
    def model(x, y):
        bbox = [
            np.min([np.min(width), np.min(x)]),
            np.max([np.max(width), np.max(x)]),
            np.min([np.min(eqPonA), np.min(y)]),
            np.max([np.max(eqPonA), np.max(y)])]

        spline = SmoothBivariateSpline(
            width, eqPonA, factor, kx=2, ky=1, bbox=bbox)

        result = spline.ev(x, y)

        return result
Esempio n. 4
0
def genShiftVectorFieldSpline(nx,ny, nsx, nsy, err_sx, err_sy, bbox=None):
    '''interpolates shift vectors using smoothing splines'''
    wonky = findWonkyVectors(nx, ny, nsx, nsy, tol=2*err_sx.mean())
    #wonky = findWonkyVectors(nx, ny, nsx, nsy, tol=100)
    good = wonky == 0

    print(('%d wonky vectors found and discarded' % wonky.sum()))
    
    if bbox:
        spx = SmoothBivariateSpline(nx[good], ny[good], nsx[good], 1./err_sx[good], bbox=bbox)
        spy = SmoothBivariateSpline(nx[good], ny[good], nsy[good], 1./err_sy[good], bbox=bbox)
    else:
        spx = SmoothBivariateSpline(nx[good], ny[good], nsx[good], 1./err_sx[good])
        spy = SmoothBivariateSpline(nx[good], ny[good], nsy[good], 1./err_sy[good])

    X, Y = np.meshgrid(np.arange(0, 512*70, 100), np.arange(0, 256*70, 100))

    dx = spx.ev(X.ravel(),Y.ravel()).reshape(X.shape)
    dy = spy.ev(X.ravel(),Y.ravel()).reshape(X.shape)

    return (dx.T, dy.T, spx, spy, good)
Esempio n. 5
0
    gm = normal_oned(x, 0.0, a, disps)
    return gm


fn = "gauss_gal_results/sersic_mog_model.smooth=0.0075.h5"

with h5py.File(fn, "r") as data:
    n = data["nsersic"][:]
    r = data["rh"][:]
    A = data["amplitudes"][:]
    smoothing = data.attrs["smoothing"]
    radii = data["radii"][:]
    xx = data["x"][:]

nm, ng = A.shape
splines = [SmoothBivariateSpline(n, r, A[:, i], s=None) for i in range(ng)]

# --- look a the splines in rh for a given n ---
if True:

    def show_splines(ncheck=2.0):
        choose = (n == ncheck)
        cfig, caxes = pl.subplots(3, 3)
        cfig.suptitle('n={:3.1f}'.format(ncheck))
        for gind in range(ng):
            cax = caxes.flat[gind]
            cax.plot(r[choose], A[choose, gind], 'o', label='fitted')
            cax.plot(r[choose],
                     np.squeeze(splines[gind](ncheck, r[choose])),
                     label='spline')
        return cfig
#%% SYSTEM SOLUTION
UG = np.linalg.solve(KG, RHSG)
if not(np.allclose(np.dot(KG, UG), RHSG)):
    print("The system is not in equilibrium!")



#%% POST-PROCCESSING
UC = pos.complete_disp(IBC, nodes, UG)
pos.plot_disp(UC, nodes, elements)

UU = pos.scatter(DME, UG, ne, neq, elements)
x = nodes[:, 1]
y = nodes[:, 2]
E_gauss, pts_gauss = pos.strainGLO(IELCON, UU, ne, COORD, elements)
E_int1 = SmoothBivariateSpline(pts_gauss[:, 0], pts_gauss[:, 1],
                               E_gauss[:, 0])
E_int2 = SmoothBivariateSpline(pts_gauss[:, 0], pts_gauss[:, 1],
                               E_gauss[:, 1])
E_int3 = SmoothBivariateSpline(pts_gauss[:, 0], pts_gauss[:, 1],
                               E_gauss[:, 2])
E1 = E_int1.ev(x, y)
E2 = E_int2.ev(x, y)
E3 = E_int3.ev(x, y)
E_nodes = np.column_stack([E1, E2, E3])
pos.plot_strain(E_nodes, nodes, elements, plt_type="pcolor")
tri = pos.mesh2tri(nodes, elements)
plt.triplot(tri, color='k', alpha=0.3)

plt.show()

Esempio n. 7
0
    def calculate_mels(self,
                       selected,
                       e1,
                       e2,
                       R,
                       grid,
                       area,
                       superposition='potential',
                       xc='LDA'):
        """ Perform integration for selected H and S integrals.

        parameters:
        -----------
        selected: list of [('dds', '3d', '4d'), (...)]
        e1: <bra| element
        e2: |ket> element
        R: e1 is at origin, e2 at z=R
        grid: list of grid points on (d, z)-plane
        area: d-z areas of the grid points.
        superposition: 'density' or 'potential' superposition scheme
        xc: exchange-correlation functional (see description in self.run())

        return:
        -------
        List of H,S and H2 for selected integrals. In the potential
        superposition scheme, H2 is calculated using a different technique
        and can be used for error estimation. This is not available
        for the density superposition scheme, where simply H2=0 is returned.

        S: simply R1 * R2 * angle_part

        H: operate (derivate) R2 <R1 | t + Veff - Conf1 - Conf2 | R2>.
           With potential superposition: Veff = Veff1 + Veff2
           With density superposition: Veff = Vxc(n1 + n2)

        H2: operate with full h2 and hence use eigenvalue of | R2>
            with full Veff2:
              <R1 | (t1 + Veff1) + Veff2 - Conf1 - Conf2 | R2>
            = <R1 | h1 + Veff2 - Conf1 - Conf2 | R2> (operate with h1 on left)
            = <R1 | e1 + Veff2 - Conf1 - Conf2 | R2>
            = e1 * S + <R1 | Veff2 - Conf1 - Conf2 | R2>
            -> H and H2 can be compared and error estimated
        """
        self.timer.start('calculate_mels')
        Sl, Hl, H2l = np.zeros(10), np.zeros(10), np.zeros(10)

        # common for all integrals (not wf-dependent parts)
        self.timer.start('prelude')
        N = len(grid)
        x = grid[:N, 0]
        y = grid[:N, 1]
        r1 = np.sqrt(x**2 + y**2)
        r2 = np.sqrt(x**2 + (R - y)**2)
        t1 = np.arccos(y / r1)
        t2 = np.arccos((y - R) / r2)
        radii = np.array([r1, r2]).T
        gphi = g(t1, t2).T

        if superposition == 'potential':
            self.timer.start('vrho')
            v1 = e1.effective_potential(r1) - e1.confinement(r1)
            v2 = e2.effective_potential(r2) - e2.confinement(r2)
            veff = v1 + v2
            self.timer.stop('vrho')
        elif superposition == 'density':
            self.timer.start('vrho')
            rho = e1.electron_density(r1) + e2.electron_density(r2)
            veff = e1.nuclear_potential(r1) + e1.hartree_potential(r1)
            veff += e2.nuclear_potential(r2) + e2.hartree_potential(r2)
            if xc in ['LDA', 'PW92']:
                xc = XC_PW92()
                veff += xc.vxc(rho)
                self.timer.stop('vrho')
            else:
                xc = LibXC(xc)
                drho1 = e1.electron_density(r1, der=1)
                drho2 = e2.electron_density(r2, der=1)
                grad_x = drho1 * np.sin(t1)
                grad_x += drho2 * np.sin(t2)
                grad_y = drho1 * np.cos(t1)
                grad_y += drho2 * np.cos(t2)
                sigma = np.sqrt(grad_x**2 + grad_y**2)**2
                out = xc.compute_all(rho, sigma)
                veff += out['vrho']
                self.timer.stop('vrho')
                self.timer.start('vsigma')
                # add gradient corrections to vxc
                # provided that we have enough points
                # (otherwise we get "dfitpack.error:
                # (m>=(kx+1)*(ky+1)) failed for hidden m")
                if out['vsigma'] is not None and len(x) > 16:
                    splx = SmoothBivariateSpline(x, y, out['vsigma'] * grad_x)
                    sply = SmoothBivariateSpline(x, y, out['vsigma'] * grad_y)
                    veff += -2. * splx(x, y, dx=1, dy=0, grid=False)
                    veff += -2. * sply(x, y, dx=0, dy=1, grid=False)
                self.timer.stop('vsigma')

        assert np.shape(gphi) == (N, 10)
        assert np.shape(radii) == (N, 2)
        assert np.shape(veff) == (N, )
        self.timer.stop('prelude')

        # calculate all selected integrals
        for integral, nl1, nl2 in selected:
            index = integrals.index(integral)
            S, H, H2 = 0., 0., 0.
            l2 = angular_momentum[nl2[1]]

            nA = len(area)
            r1 = radii[:nA, 0]
            r2 = radii[:nA, 1]
            d, z = grid[:nA, 0], grid[:nA, 1]
            aux = gphi[:nA, index] * area * d
            Rnl1 = e1.Rnl(r1, nl1)
            Rnl2 = e2.Rnl(r2, nl2)
            ddunl2 = e2.unl(r2, nl2, der=2)

            S = np.sum(Rnl1 * Rnl2 * aux)
            H = np.sum(Rnl1 * (-0.5 * ddunl2 / r2 + (veff + \
                       l2 * (l2 + 1) / (2 * r2 ** 2)) * Rnl2) * aux)

            if superposition == 'potential':
                H2 = np.sum(Rnl1 * Rnl2 * aux * (v2 - e1.confinement(r1)))
                H2 += e1.get_epsilon(nl1) * S
            elif superposition == 'density':
                H2 = 0

            Sl[index] = S
            Hl[index] = H
            H2l[index] = H2

        self.timer.stop('calculate_mels')
        return Sl, Hl, H2l
def create_rotor_functions():

    #loading data
    filename = '/home/flowlab/PJ/FLORISSE3D/doc/BEST_DATA.txt'
    opened = open(filename)
    data = np.loadtxt(opened)
    "ratedPower, rotorDiameter, ratedQ, blade_mass, Vrated, I1, I2, I3, ratedT, extremeT"
    ratedPower = data[:, 0]
    rotorDiameter = data[:, 1]
    ratedQ = data[:, 2]
    blade_mass = data[:, 3]
    Vrated = data[:, 4]
    I1 = data[:, 5]
    I2 = data[:, 6]
    I3 = data[:, 7]
    ratedT = data[:, 8]
    extremeT = data[:, 9]

    ratedPower = ratedPower / max(ratedPower)
    rotorDiameter = rotorDiameter / max(rotorDiameter)
    ratedQ = ratedQ / max(ratedQ)
    blade_mass = blade_mass / max(blade_mass)
    Vrated = Vrated / max(Vrated)
    I1 = I1 / max(I1)
    I2 = I2 / max(I2)
    I3 = I3 / max(I3)
    ratedT = ratedT / max(ratedT)
    extremeT = extremeT / max(extremeT)

    w = np.ones(len(ratedPower)) * 2.
    order = 2

    interp_spline_ratedQ = SmoothBivariateSpline(ratedPower,
                                                 rotorDiameter,
                                                 ratedQ,
                                                 w,
                                                 kx=order,
                                                 ky=order)
    interp_spline_blade_mass = SmoothBivariateSpline(ratedPower,
                                                     rotorDiameter,
                                                     blade_mass,
                                                     w,
                                                     kx=order,
                                                     ky=order)
    interp_spline_Vrated = SmoothBivariateSpline(ratedPower,
                                                 rotorDiameter,
                                                 Vrated,
                                                 w,
                                                 kx=order,
                                                 ky=order)
    interp_spline_I1 = SmoothBivariateSpline(ratedPower,
                                             rotorDiameter,
                                             I1,
                                             w,
                                             kx=order,
                                             ky=order)
    interp_spline_I2 = SmoothBivariateSpline(ratedPower,
                                             rotorDiameter,
                                             I2,
                                             w,
                                             kx=order,
                                             ky=order)
    interp_spline_I3 = SmoothBivariateSpline(ratedPower,
                                             rotorDiameter,
                                             I3,
                                             w,
                                             kx=order,
                                             ky=order)
    interp_spline_ratedT = SmoothBivariateSpline(ratedPower,
                                                 rotorDiameter,
                                                 ratedT,
                                                 w,
                                                 kx=order,
                                                 ky=order)
    interp_spline_extremeT = SmoothBivariateSpline(ratedPower,
                                                   rotorDiameter,
                                                   extremeT,
                                                   w,
                                                   kx=order,
                                                   ky=order)

    return interp_spline_ratedQ, interp_spline_blade_mass, interp_spline_Vrated, interp_spline_I1, interp_spline_I2, interp_spline_I3, interp_spline_ratedT, interp_spline_extremeT
Esempio n. 9
0
def main(datafiles):
    skafta = {}
    skafta['ul_polstr'] = [1294500., -2489500.]
    skafta['lr_polstr'] = [1298500., -2493500.]
    skafta['mask_val'] = 1

    data = Data(datadic)
    data.read_data()

    ### convert to logical
    if skafta['mask_val'] == 1:
        data.mask_logic = data.mask < 0.5
    else:
        data.mask_logic = data.mask > 0.5
    ### mask null values in data
    null_mask = data.dem > 0.
    data.mask_logic *= null_mask

    ### get row and column for Skafta
    #ul_row = np.int(np.abs(data.hdr['ulx'] - skafta['ul_polstr'][0]) / data.hdr['spx'])
    #ul_col = np.int(np.abs(data.hdr['uly'] - skafta['ul_polstr'][1]) / data.hdr['spy'])
    #lr_row = np.int(np.abs(data.hdr['ulx'] - skafta['lr_polstr'][0]) / data.hdr['spx'])
    #lr_col = np.int(np.abs(data.hdr['uly'] - skafta['lr_polstr'][1]) / data.hdr['spy'])
    ul_row = 948
    ul_col = 2791
    lr_row = 2851
    lr_col = 5126

    ### cut out Skafta
    data.dem_skafta = data.dem[ul_row:lr_row, ul_col:lr_col]
    data.mask_logic_skafta = data.mask_logic[ul_row:lr_row, ul_col:lr_col]
    #skafta_shape = data.dem_skafta.shape

    data.skafta_gridx = np.empty_like(data.dem_skafta)
    data.skafta_gridy = np.empty_like(data.dem_skafta)
    for i in range(data.skafta_gridx.shape[0]):
        data.skafta_gridx[i, :] = skafta['ul_polstr'][0] + np.arange(
            data.dem_skafta.shape[1]) * data.hdr['spx']
    for i in range(data.skafta_gridx.shape[1]):
        data.skafta_gridy[:, i] = skafta['ul_polstr'][1] - np.arange(
            data.dem_skafta.shape[0]) * data.hdr['spy']

    data.dem_skafta_mask = data.dem_skafta.flatten()[
        data.mask_logic_skafta.flatten()]
    data.skafta_gridx_mask = data.skafta_gridx.flatten()[
        data.mask_logic_skafta.flatten()]
    data.skafta_gridy_mask = data.skafta_gridy.flatten()[
        data.mask_logic_skafta.flatten()]
    #
    # ## Optional: write xyz file for use with GMT surface
    if False:
        decfac_xyz = 50
        xyzout = np.empty((len(data.dem_skafta_mask[::decfac_xyz]), 3),
                          dtype=np.float32)
        xyzout[:, 0] = data.skafta_gridx_mask[::decfac_xyz]
        xyzout[:, 1] = data.skafta_gridy_mask[::decfac_xyz]
        xyzout[:, 2] = data.dem_skafta_mask[::decfac_xyz]
        np.savetxt(datadic['output_skafta_xyz'],
                   xyzout,
                   fmt='%10.8f',
                   delimiter='   ')

    ### first stab: unsat due to artifacts using both linear and cubic...likely due to piecewise nature of gridding
    # data.dem_skafta_filled = griddata((data.skafta_gridx_mask,data.skafta_gridy_mask),data.dem_skafta_mask,
    #             (data.skafta_gridx,data.skafta_gridy),method='cubic')
    # data.dem[ul_row:lr_row,ul_col:lr_col] = data.dem_skafta_filled
    # #data.dem[~null_mask] = -9999.

    ### second stab using global spline
    ### need to decimate

    print('defining spline function')
    decfac = 20
    sporder = 5

    data.dem_skafta_dec = data.dem_skafta[::-decfac, ::decfac].flatten()
    data.mask_skafta_dec = data.mask_logic_skafta[::-decfac, ::decfac].flatten(
    ).astype(np.float32) + np.finfo(np.float64).eps
    data.skafta_gridx_dec = data.skafta_gridx[::-decfac, ::decfac].flatten()
    data.skafta_gridy_dec = data.skafta_gridy[::-decfac, ::decfac].flatten()

    data.spline_fun = SmoothBivariateSpline(x=data.skafta_gridx_dec,
                                            y=data.skafta_gridy_dec,
                                            z=data.dem_skafta_dec,
                                            w=data.mask_skafta_dec,
                                            kx=sporder,
                                            ky=sporder,
                                            s=750)
    #print(data.spline_fun)

    print('done defining spline function...interpolating')
    data.dem_skafta_filled = data.spline_fun.ev(data.skafta_gridx,
                                                data.skafta_gridy)
    data.dem_diff = data.dem_skafta - data.dem_skafta_filled

    data.dem_skafta_filled_ddx = data.spline_fun.ev(
        data.skafta_gridx, data.skafta_gridy, dx=1) / data.hdr['spx']
    data.dem_skafta_filled_ddy = data.spline_fun.ev(
        data.skafta_gridx, data.skafta_gridy, dy=1) / data.hdr['spy']
    data.dem_skafta_filled_ddx2 = data.spline_fun.ev(
        data.skafta_gridx, data.skafta_gridy, dx=2) / data.hdr['spx']**2
    data.dem_skafta_filled_ddy2 = data.spline_fun.ev(
        data.skafta_gridx, data.skafta_gridy, dy=2) / data.hdr['spy']**2
    data.dem_skafta_filled_ddxdy = data.spline_fun.ev(
        data.skafta_gridx, data.skafta_gridy, dx=1,
        dy=1) / (data.hdr['spx'] * data.hdr['spy'])

    data.dem_skafta_filled_slope = np.sqrt(data.dem_skafta_filled_ddx**2 +
                                           data.dem_skafta_filled_ddy**2)

    data.dem_skafta_filled_laplacian = data.dem_skafta_filled_ddx2 + data.dem_skafta_filled_ddy2
    ### calculate mean curvature...see "Surfaces in 3D space" at https://en.wikipedia.org/wiki/Mean_curvature
    data.dem_skafta_filled_curvature = 0.5 * (
        (1. + data.dem_skafta_filled_ddx**2) * data.dem_skafta_filled_ddy2 -
        2. * data.dem_skafta_filled_ddx * data.dem_skafta_filled_ddy *
        data.dem_skafta_filled_ddxdy +
        (1. + data.dem_skafta_filled_ddy**2) * data.dem_skafta_filled_ddx2) / (
            1. + data.dem_skafta_filled_ddx**2 +
            data.dem_skafta_filled_ddy**2)**1.5

    data.dem[ul_row:lr_row, ul_col:lr_col] = data.dem_skafta_filled
    data.slope = 0. * data.dem
    data.slope[ul_row:lr_row, ul_col:lr_col] = data.dem_skafta_filled_slope
    data.laplacian = 0. * data.dem
    data.laplacian[ul_row:lr_row,
                   ul_col:lr_col] = data.dem_skafta_filled_laplacian
    data.curvature = 0. * data.dem
    data.curvature[ul_row:lr_row,
                   ul_col:lr_col] = data.dem_skafta_filled_curvature
    data.ddx2 = 0. * data.dem
    data.ddx2[ul_row:lr_row, ul_col:lr_col] = data.dem_skafta_filled_ddx2
    data.ddy2 = 0. * data.dem
    data.ddy2[ul_row:lr_row, ul_col:lr_col] = data.dem_skafta_filled_ddy2
    data.ddxdy = 0. * data.dem
    data.ddxdy[ul_row:lr_row, ul_col:lr_col] = data.dem_skafta_filled_ddxdy

    print('writing')
    with open(datadic['output_dem'], 'w') as fid:
        data.dem.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_slope'], 'w') as fid:
        data.slope.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_laplacian'], 'w') as fid:
        data.laplacian.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_curvature'], 'w') as fid:
        data.curvature.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_ddx2'], 'w') as fid:
        data.ddx2.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_ddy2'], 'w') as fid:
        data.ddy2.flatten().astype(np.float32).tofile(fid)
    with open(datadic['output_ddxdy'], 'w') as fid:
        data.ddxdy.flatten().astype(np.float32).tofile(fid)

    if True:
        data.dem[:, :] = 0.
        data.dem[ul_row:lr_row, ul_col:lr_col] = data.dem_diff
        with open(datadic['output_diff'], 'w') as fid:
            data.dem.flatten().astype(np.float32).tofile(fid)
Esempio n. 10
0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 18:41:47 2013

@author: Nathan
"""
import matplotlib.pyplot as plt
from scipy.interpolate import SmoothBivariateSpline
from scipy import optimize as opt
from scipy.interpolate import UnivariateSpline, interp1d
import math
import numpy as np
import itertools
import collections
import logging

lookup = "C:\\Users\\Nathan\\Desktop\\CAR sync\\Buckeye_Current\\python\\bike_optimization\\test_in\\Lookup Files\\Emrax_eff.csv"

n = np.loadtxt(lookup, dtype='string', delimiter=',', skiprows=1)
x = n[:, 0].astype(np.float)
y = n[:, 1].astype(np.float)
z = n[:, 2].astype(np.float)
f = SmoothBivariateSpline(x, y, z)

xnew = np.arange(0, 5000, 20)
ynew = np.arange(0, 250)
znew = f(xnew, ynew)
plt.plot(x, z, 'ro-', xnew, znew[:, 0], 'b-')
plt.show()
Esempio n. 11
0
        def process(self):

            if not self.inputs['Vertices'].is_linked:
                return

            if not self.outputs['Vertices'].is_linked:
                return

            vertices_s = self.inputs['Vertices'].sv_get()
            points_s = self.inputs['GridPoints'].sv_get()
            smooth_s = self.inputs['Smooth'].sv_get()
            degree_s = self.inputs['Degree'].sv_get()
            weights_s = self.inputs['Weights'].sv_get(default=[[1.0]])
            matrices_s = self.inputs['Matrix'].sv_get(default=[[Matrix()]])

            verts_out = []
            edges_out = []
            faces_out = []
            for vertices, weights, degree, matrix, smooth, grid_points in zip_long_repeat(
                    vertices_s, weights_s, degree_s, matrices_s, smooth_s,
                    points_s):
                if isinstance(grid_points, (list, tuple)):
                    grid_points = grid_points[0]
                if isinstance(degree, (list, tuple)):
                    degree = degree[0]
                if isinstance(smooth, (list, tuple)):
                    smooth = smooth[0]
                if isinstance(matrix, list):
                    matrix = matrix[0]
                has_matrix = matrix is not None and matrix != Matrix()

                fullList(weights, len(vertices))

                smooth = smooth * len(vertices)

                XYZ = np.array(vertices)
                if has_matrix:
                    np_matrix = np.array(matrix.to_3x3())
                    inv_matrix = np.linalg.inv(np_matrix)
                    #print(matrix)
                    #print(XYZ)
                    translation = np.array(matrix.translation)
                    XYZ = np.matmul(inv_matrix, XYZ.T).T + translation
                if self.orientation == 'X':
                    reorder = np.array([1, 2, 0])
                    XYZ = XYZ[:, reorder]
                elif self.orientation == 'Y':
                    reorder = np.array([2, 0, 1])
                    XYZ = XYZ[:, reorder]
                else:  # Z
                    pass

                x_min = XYZ[:, 0].min()
                x_max = XYZ[:, 0].max()
                y_min = XYZ[:, 1].min()
                y_max = XYZ[:, 1].max()
                xi = np.linspace(x_min, x_max, grid_points)
                yi = np.linspace(y_min, y_max, grid_points)
                XI, YI = np.meshgrid(xi, yi)

                spline = SmoothBivariateSpline(XYZ[:, 0],
                                               XYZ[:, 1],
                                               XYZ[:, 2],
                                               kx=degree,
                                               ky=degree,
                                               w=weights,
                                               s=smooth)
                ZI = spline(xi, yi)

                if self.orientation == 'X':
                    YI, ZI, XI = XI, YI, ZI
                elif self.orientation == 'Y':
                    ZI, XI, YI = XI, YI, ZI
                else:  # Z
                    pass

                new_verts = np.dstack((YI, XI, ZI))
                if has_matrix:
                    new_verts = new_verts - translation
                    new_verts = np.apply_along_axis(lambda v: np_matrix @ v, 2,
                                                    new_verts)
                new_verts = new_verts.tolist()
                new_verts = sum(new_verts, [])
                new_edges = self.make_edges(grid_points)
                new_faces = self.make_faces(grid_points)
                verts_out.append(new_verts)
                edges_out.append(new_edges)
                faces_out.append(new_faces)

            self.outputs['Vertices'].sv_set(verts_out)
            self.outputs['Edges'].sv_set(edges_out)
            self.outputs['Faces'].sv_set(faces_out)
Esempio n. 12
0
    def resample(self,raster,method,maxDist=1500,statistic='mean'):
        #if we are just block averaging, use myBinnedSamples to add to the raster where it was nan
        if (method == ResampleMethods.BlockAvg):
            myBinnedSamples, yedges, xedges, myBinNums = stats.binned_statistic_2d(self.y, self.x, self.z,
                                                                                   statistic=statistic,
                                                                                   bins=[raster.yBinEdges,
                                                                                         raster.xBinEdges],
                                                                                   expand_binnumbers=True)
            raster.z = np.where(np.isnan(raster.z),myBinnedSamples,raster.z)
        #otherwise things are a little more complicated!
        else:
            #determine which points in this Samples should be excluded as already covered in the raster
            myBinnedSamples, xedges, yedges, myBinNums = stats.binned_statistic_2d(self.x, self.y, self.z,
                                                                                   statistic='mean',
                                                                                   bins=[raster.xBinEdges,
                                                                                         raster.yBinEdges],
                                                                                   expand_binnumbers=True)
            
            # swap x/y to be y dimension 0 x dimension 1 so consistent with plotting convention and meshgrid
            # annoyingly the myBinNums represents the index in the edge array not the output myBinnedSamples array,
            # with out of bounds data in the outer cells of the edge array
            # clip this off, re-index and set out of bounds data to a binnumber of -1
            myBinNums = myBinNums - 1
            x_inds = myBinNums[0, :]
            y_inds = myBinNums[1, :]
            internalBins = np.where(
                (x_inds >= 0) & (y_inds >= 0) & (x_inds < len(xedges) - 1) & (y_inds < len(yedges) - 1))
            internalBinNums = -1 * np.ones(self.z.shape, dtype='int64')
            # re-ravel the 2D index, however need to use column-major order for consistency in the layout of raster.z
            internalBinNums[internalBins] = np.ravel_multi_index((x_inds[internalBins], y_inds[internalBins]),
                                                           (len(xedges)-1, len(yedges)-1),order='F')
            # internalBinNums=np.delete(internalBinNums,np.where(internalBinNums==-1))

            rasterBinNums = np.where(~np.isnan(raster.z.ravel()))[0]
            requiredSampleBins = np.setdiff1d(internalBinNums,rasterBinNums)
            requiredSamplesIndexes = np.where(np.in1d(internalBinNums,requiredSampleBins))
            requiredSamplesX = self.x[requiredSamplesIndexes]
            requiredSamplesY = self.y[requiredSamplesIndexes]
            requiredSamplesZ = self.z[requiredSamplesIndexes]
            print('Using %i SamplePoints to fill %i raster points with in the SamplePoints boundary' % (np.size(requiredSamplesX),np.size(requiredSampleBins)))
            #now that we have the 'new' samples provided by this Sample object (excluding those already covered by the raster)
            #bring in the rasters samples so that edges between data are kept consistent with smooth/linear transitions and included in the kriging/interpolation process
            combinedSampleSet = raster.getSamples()
            combinedSampleSet.appendSamples(requiredSamplesX,requiredSamplesY,requiredSamplesZ)

            # import pylab as pl
            # fig = pl.figure(figsize=(10, 10))
            # ax = fig.add_subplot(111)
            # margin = 250
            # x_min, y_min, x_max, y_max = raster.bbox
            # ax.set_xlim([x_min - margin, x_max + margin])
            # ax.set_ylim([y_min - margin, y_max + margin])
            # #pl.pcolor(raster.x-raster.resolution/2, raster.y-raster.resolution/2, raster.z, vmin=-20, vmax=0)
            # pl.pcolor(xedges,yedges,myBinnedSamples.transpose(),vmin=-20,vmax=0)
            # pl.scatter(requiredSamplesX, requiredSamplesY, 80, requiredSamplesZ, 's', vmin=-20, vmax=0)
            # pl.scatter(combinedSampleSet.x, combinedSampleSet.y, 80, combinedSampleSet.z, '.', vmin=-20, vmax=0)
            # pl.show()
            
            #Work out what points in the raster fall within the boundary of this Samples object and should be filled by the objects Samples
            try:
                boundaryType = self.boundaryType
                x=raster.x.ravel()
                y=raster.y.ravel()
                z=raster.z.copy().ravel()
                bbox=self.getBoundingBox()
                if (self.boundaryType == BoundaryPolygonType.Box):
                    inds = (x > bbox[0]) & (y > bbox[1]) & (x < bbox[2]) & (y < bbox[3]) & (np.isnan(z))
                elif (self.boundary.type == 'Polygon'):
                    # matplotlib path has function for array of points, shapely Polygon only seems to work on individual points...
                    boundaryPath = path.Path(np.stack(self.boundary.exterior.xy).transpose())
                    inds = (x > bbox[0]) & (y > bbox[1]) & (x < bbox[2]) & (y < bbox[3]) & boundaryPath.contains_points(np.stack((x,y),axis=0).transpose())
                elif (self.boundary.type == 'MultiPolygon'):
                    inds = np.empty(x.shape[0])
                    inds[:] = False
                    for polygon in self.boundary:
                        boundaryPath = path.Path(np.stack(polygon.boundary.exterior.xy).transpose())
                        inds = inds | boundaryPath.contains_points(np.stack((x,y),axis=0).transpose())
                    #inds=np.where(inds)
                rasterSamplePointsX = x[np.where(inds)]
                rasterSamplePointsY = y[np.where(inds)]
                #myBoundary=getattr(self,'boundary',geometry.MultiPoint(zip(self.x,self.y)))
                #self.boundary=getattr(self,'boundary',geometry.MultiPoint(zip(self.x,self.y)))
            except AttributeError:
                raise AttributeError("Boundary not identified, run getBoundary first and specify a type.")
            
            #Finally do the resampling with the specified method
            if (method == ResampleMethods.Linear):
                Z = griddata(list(zip(combinedSampleSet.x, combinedSampleSet.y)), combinedSampleSet.z,
                             list(zip(rasterSamplePointsX, rasterSamplePointsY)), method='linear')
            elif (method == ResampleMethods.Cubic):
                Z = griddata(list(zip(combinedSampleSet.x, combinedSampleSet.y)), combinedSampleSet.z,
                             list(zip(rasterSamplePointsX, rasterSamplePointsY)), method='cubic')
            elif (method == ResampleMethods.SmoothCubic):
                F = CloughTocher2DInterpolator(list(zip(combinedSampleSet.x, combinedSampleSet.y)), combinedSampleSet.z,
                             rescale=True)
                Z = F(rasterSamplePointsX, rasterSamplePointsY)
            elif (method == ResampleMethods.Kriging):
                points = np.stack((combinedSampleSet.x,combinedSampleSet.y),axis=1)
                grd = np.stack((rasterSamplePointsX,rasterSamplePointsY)).transpose()
                self.Finterp = kriging(points,grd,rescale=True,maxdist=maxDist,NNear=7)
                Z = self.Finterp(combinedSampleSet.z)
            elif (method == ResampleMethods.BsplineLSQ):
                F = LSQBivariateSpline(combinedSampleSet.x,combinedSampleSet.y,combinedSampleSet.z,raster.xBinCentres,raster.yBinCentres)
                Z = F(rasterSamplePointsX,rasterSamplePointsY,grid=False)
            elif (method == ResampleMethods.BsplineSmooth):
                F = SmoothBivariateSpline(combinedSampleSet.x, combinedSampleSet.y, combinedSampleSet.z)
                Z = F(rasterSamplePointsX, rasterSamplePointsY,grid=False)
            elif (method == ResampleMethods.NaturalNeighbour):
                print('** THis doesn''t work for me (RON) at the moment - error "undefined symbol: _intel_fast_memcpy"')
		        #_natgrid.seti(b'ext', 0)
                #_natgrid.setr(b'nul', np.nan)
                #zi = np.empty((raster.xBinCentres.shape[0],raster.yBinCentres.shape[0]), np.float64)
                #xp = np.require(combinedSampleSet.x, requirements=['C'])
                #yp = np.require(combinedSampleSet.y, requirements=['C'])
                #zp = np.require(combinedSampleSet.z, requirements=['C'])
                #xi = np.require(raster.xBinCentres, requirements=['C'])
                #yi = np.require(raster.yBinCentres, requirements=['C'])
                #_natgrid.natgridd(xp, yp, zp, xi, yi, zi)

                #Z = zi[inds.reshape(zi.shape)]
            elif (method == ResampleMethods.Rbf):                
                coords=list(zip(self.x,self.y))
                tri = Delaunay(coords)
                rbfi = Rbf(self.x,self.y,self.z)
                Z = rbfi(x,y)
        
            z[np.where(inds)]=Z
            raster.z = np.where(np.isnan(raster.z), np.reshape(z,raster.z.shape), raster.z)
Esempio n. 13
0
        vthre[m].append(vth[r][p][m][ti])
        vphre[m].append(vph[r][p][m][ti])

    vrre[m] = np.array(vrre[m])  #Turn lists into numpy arrays
    vthre[m] = np.array(vthre[m])
    vphre[m] = np.array(vphre[m])

### Define 2d splines of velocity field

vrsp = {}  #Spline dictionaries, keys will be mode frequency names
vthsp = {}
vphsp = {}

for m in modes:  #Loop over modes for this particular rotation case

    vrsp[m] = SmoothBivariateSpline(rr, theta, vrre[m])
    vthsp[m] = SmoothBivariateSpline(rr, theta, vthre[m])
    vphsp[m] = SmoothBivariateSpline(rr, theta, vphre[m])

### Perform decomposition

Er = {}  #Dictionaries to hold the radial coefficients
Eth = {}
Eph = {}

for m in modes:

    Er[m] = {}
    Eth[m] = {}
    Eph[m] = {}
Esempio n. 14
0
    print('Splines (with linear extrapolation) : {} s per call'.format((s-t)/10/n_v))
    print( 'Max error : {}'.format(abs(out-true_vals).max()) )
    print( 'Mean error : {}'.format(abs(out-true_vals).mean()) )




    if d == 2:

        print('')

        from scipy.interpolate import SmoothBivariateSpline
        grid_x = grid[0,:]
        grid_y = grid[1,:]

        values = mvs.values[0,:]

        bs = SmoothBivariateSpline(grid_x, grid_y, values)

        t = time.time()
        for i in range(10):
            out = bs.ev(points[0,:], points[1,:])
        s = time.time()
        print('Splines (smooth splines from scipy) : {} s per call'.format((s-t)/10))
        print( 'Max error : {}'.format(abs(out-true_vals).max()) )
        print( 'Mean error : {}'.format(abs(out-true_vals).mean()) )




Esempio n. 15
0
    def makeInterpolation(self, myMethod="sbs", methodVal=None):
        # construct the interpolation grids for all coordinate systems

        # dictionaries with interpolation grids and coordinate systems
        self.interpGrids = {}
        self.interpEdges = {}
        self.interpValues = {}
        self.interpSpline = {}
        self.interpRbf = {}
        self.interpIDW = {}
        self.interpBMedian = {}
        self.interpBNentry = {}
        self.interpBMAD = {}
        self.interpTMean = {}
        self.interpTStd = {}

        # loop over Coordinate systems
        for iCoord in self.coordList:

            # build cell-centers for the interpolation grid
            ny, ylo, yhi, nx, xlo, xhi = self.gridArray[iCoord]
            yGrid, xGrid, yEdge, xEdge = self.makeGrid(ny, ylo, yhi, nx, xlo,
                                                       xhi)
            self.interpGrids[iCoord] = [xGrid, yGrid]
            self.interpEdges[iCoord] = [xEdge, yEdge]

            data = self.pointsArray[iCoord]
            if self.debugFlag:
                print("PointMesh: At ", iCoord, "we have ", data.shape[0],
                      " points")
            npts = data.shape[0]

            # check number of points
            if npts >= 5:
                xData = data[:, 0]
                yData = data[:, 1]
                zData = data[:, 2]

                if myMethod == "sbs":

                    # SmoothBivariateSpline
                    if npts > 600:
                        self.interpSpline[iCoord] = SmoothBivariateSpline(
                            xData,
                            yData,
                            zData,
                            bbox=[xlo, xhi, ylo, yhi],
                            kx=4,
                            ky=4,
                            s=1.e6)
                    elif npts >= 100:
                        self.interpSpline[iCoord] = SmoothBivariateSpline(
                            xData,
                            yData,
                            zData,
                            bbox=[xlo, xhi, ylo, yhi],
                            kx=3,
                            ky=3,
                            s=1.e6)
                    elif npts > 9:
                        self.interpSpline[iCoord] = SmoothBivariateSpline(
                            xData,
                            yData,
                            zData,
                            bbox=[xlo, xhi, ylo, yhi],
                            kx=2,
                            ky=2,
                            s=1.e6)
                    else:
                        self.interpSpline[iCoord] = SmoothBivariateSpline(
                            xData,
                            yData,
                            zData,
                            bbox=[xlo, xhi, ylo, yhi],
                            kx=1,
                            ky=1,
                            s=1.e7)
                    self.interpValues[iCoord] = self.interpSpline[iCoord].ev(
                        xGrid.reshape((ny * nx)), yGrid.reshape(
                            (ny * nx))).reshape((ny, nx))

                elif myMethod == "rbf":

                    self.interpRbf[iCoord] = Rbf(xData, yData, zData)
                    self.interpValues[iCoord] = self.interpRbf[iCoord](
                        xGrid.reshape((ny * nx)), yGrid.reshape(
                            (ny * nx))).reshape((ny, nx))

                elif myMethod == "tmean":

                    # use the truncated mean for each Coord -- very very simple!!
                    zstd = stats.tstd(zData)
                    zmean = stats.tmean(zData)
                    ztmean = stats.tmean(
                        zData, (zmean - 3. * zstd, zmean + 3. * zstd))
                    ztstd = stats.tstd(zData,
                                       (zmean - 3. * zstd, zmean + 3. * zstd))
                    self.interpTMean[iCoord] = ztmean
                    self.interpTStd[iCoord] = ztstd
                    self.interpValues[iCoord] = ztmean * numpy.ones((ny, nx))

                elif myMethod == "bmedian":

                    # use the median for each bin in each Coord
                    self.interpBMedian[iCoord] = numpy.zeros((ny, nx))
                    self.interpBNentry[iCoord] = numpy.zeros((ny, nx))
                    self.interpBMAD[iCoord] = numpy.zeros((ny, nx))
                    zvalL = []
                    xbin = numpy.digitize(xData, xEdge[0, :]) - 1
                    ybin = numpy.digitize(yData, yEdge[:, 0]) - 1
                    for i in range(nx):
                        for j in range(ny):
                            ok = numpy.logical_and.reduce(
                                (xbin == i, ybin == j))
                            zHere = zData[ok]
                            # add nEntry, MAD to the saved variables for the Mesh
                            nEntry = zHere.shape[0]
                            if nEntry >= 1:
                                median_value = numpy.median(zHere)
                                self.interpBMedian[iCoord][j, i] = median_value
                                self.interpBNentry[iCoord][j, i] = nEntry
                                self.interpBMAD[iCoord][j, i] = numpy.median(
                                    numpy.abs(zHere - median_value))
                            else:
                                # need to do something better!
                                self.interpBMedian[iCoord][j, i] = 0.
                    # fill interpValues, need to match order of locations in xGrid and yGrid
                    self.interpValues[iCoord] = self.interpBMedian[
                        iCoord].copy()

                elif myMethod == "grid":
                    Z = griddata((xData, yData), zData, (xGrid.reshape(
                        (ny * nx)), yGrid.reshape((ny * nx))), "linear")
                    self.interpValues[iCoord] = Z.reshape(xGrid.shape)

                elif myMethod == "idw":
                    # July 15, 2013 - change to use epsilon=1.0 (mm) to set a cutoff in the distance
                    # this will make small changes in the results for all Donuts
                    if methodVal != None:
                        usekNN = methodVal[0]
                        useEpsilon = methodVal[1]
                    else:
                        usekNN = 4
                        useEpsilon = 1.0
                    self.interpIDW[iCoord] = IDWInterp(xData,
                                                       yData,
                                                       zData,
                                                       kNN=usekNN,
                                                       epsilon=useEpsilon)
                    self.interpValues[iCoord] = self.interpIDW[iCoord].ev(
                        xGrid.reshape((ny * nx)), yGrid.reshape(
                            (ny * nx))).reshape((ny, nx))

                else:
                    self.interpSpline[iCoord] = None
                    self.interpValues[iCoord] = numpy.zeros(xGrid.shape)

            else:
                self.interpTMean[iCoord] = 0.0
                self.interpBMedian[iCoord] = None
                self.interpBNentry[iCoord] = None
                self.interpBMAD[iCoord] = None
                self.interpTStd[iCoord] = 0.0
                self.interpSpline[iCoord] = None
                self.interpRbf[iCoord] = None
                self.interpValues[iCoord] = numpy.zeros(xGrid.shape)
Esempio n. 16
0
    def calibrate_photometry_gaia(self, solution_num=None, iteration=1):
        """
        Calibrate extracted magnitudes with Gaia data.

        """

        num_solutions = self.plate_solution.num_solutions

        assert (solution_num is None
                or (solution_num > 0 and solution_num <= num_solutions))

        self.log.write(
            'Photometric calibration: solution {:d}, iteration {:d}'.format(
                solution_num, iteration),
            level=3,
            event=70,
            solution_num=solution_num)

        # Initialise the flag value
        self.phot_calibrated = False

        if 'METHOD' in self.plate_header:
            pmethod = self.plate_header['METHOD']

            if (pmethod is not None and pmethod != ''
                    and 'direct photograph' not in pmethod
                    and 'focusing' not in pmethod
                    and 'test plate' not in pmethod):
                self.log.write('Cannot calibrate photometry due to unsupported'
                               'observation method ({:s})'.format(pmethod),
                               level=2,
                               event=70,
                               solution_num=solution_num)
                return

        # Create dictionary for calibration results
        self.phot_calib = OrderedDict()

        # Create output directory, if missing
        if self.write_phot_dir and not os.path.isdir(self.write_phot_dir):
            self.log.write('Creating output directory {}'.format(
                self.write_phot_dir),
                           level=4,
                           event=70,
                           solution_num=solution_num)
            os.makedirs(self.write_phot_dir)

        if self.write_phot_dir:
            fn_cterm = os.path.join(self.write_phot_dir,
                                    '{}_cterm.txt'.format(self.basefn))
            fcterm = open(fn_cterm, 'wb')
            fn_caldata = os.path.join(self.write_phot_dir,
                                      '{}_caldata.txt'.format(self.basefn))
            fcaldata = open(fn_caldata, 'wb')

        # Select sources for photometric calibration
        self.log.write('Selecting sources for photometric calibration',
                       level=3,
                       event=71,
                       solution_num=solution_num,
                       double_newline=False)

        if solution_num is None:
            solution_num = 1

        self.phot_calib['solution_num'] = solution_num
        self.phot_calib['iteration'] = iteration

        # Store number of Gaia DR2 objects matched with the current solution
        bgaia = (self.sources['solution_num'] == solution_num)
        self.phot_calib['num_gaia_edr3'] = bgaia.sum()

        # For single exposures, exclude blended sources.
        # For multiple exposures, include them, because otherwise the bright
        # end will lack calibration stars.
        if num_solutions == 1:
            bflags = ((self.sources['sextractor_flags'] == 0) |
                      (self.sources['sextractor_flags'] == 2))
        else:
            bflags = self.sources['sextractor_flags'] <= 3

        # Create calibration-star mask
        # Discard very red stars (BP-RP > 2)
        cal_mask = ((self.sources['solution_num'] == solution_num) &
                    (self.sources['mag_auto'] > 0) &
                    (self.sources['mag_auto'] < 90) & bflags &
                    (self.sources['flag_clean'] == 1)
                    & ~self.sources['gaiaedr3_bpmag'].mask
                    & ~self.sources['gaiaedr3_rpmag'].mask &
                    (self.sources['gaiaedr3_bp_rp'].filled(99.) <= 2) &
                    (self.sources['gaiaedr3_neighbors'] == 1))

        num_calstars = cal_mask.sum()
        self.phot_calib['num_candidate_stars'] = num_calstars

        if num_calstars == 0:
            self.log.write('No stars for photometric calibration',
                           level=2,
                           event=71,
                           solution_num=solution_num)
            return

        self.log.write('Found {:d} calibration-star candidates with '
                       'Gaia magnitudes on the plate'.format(num_calstars),
                       level=4,
                       event=71,
                       solution_num=solution_num)

        if num_calstars < 10:
            self.log.write('Too few calibration stars on the plate!',
                           level=2,
                           event=71,
                           solution_num=solution_num)
            return

        # Evaluate color term

        if iteration == 1:
            self.log.write('Determining color term using annular bins 1-3',
                           level=3,
                           event=72,
                           solution_num=solution_num)
            cterm_mask = cal_mask & (self.sources['annular_bin'] <= 3)

            if cterm_mask.sum() < 50:
                self.log.write('Found {:d} calibration stars in bins 1-3, '
                               'increasing area'.format(cterm_mask.sum()),
                               level=4,
                               event=72,
                               solution_num=solution_num)
                self.log.write('Determining color term using annular bins 1-6',
                               level=3,
                               event=72,
                               solution_num=solution_num)
                cterm_mask = cal_mask & (self.sources['annular_bin'] <= 6)
        else:
            self.log.write('Determining color term using annular bins 1-8',
                           level=3,
                           event=72,
                           solution_num=solution_num)
            cterm_mask = cal_mask & (self.sources['annular_bin'] <= 8)

        self.evaluate_color_term(self.sources[cterm_mask],
                                 solution_num=solution_num)

        # If color term was not determined, we need to terminate the
        # calibration
        if 'color_term' not in self.phot_calib:
            self.log.write(
                'Cannot continue photometric calibration without '
                'color term',
                level=2,
                event=72,
                solution_num=solution_num)
            return

        cterm = self.phot_calib['color_term']
        cterm_err = self.phot_calib['color_term_error']

        # Use stars in all annular bins
        self.log.write('Photometric calibration using annular bins 1-9',
                       level=3,
                       event=73,
                       solution_num=solution_num)

        # Select stars with unique plate mag values
        plate_mag = self.sources['mag_auto'][cal_mask].data
        plate_mag_u, uind = np.unique(plate_mag, return_index=True)
        ind_calibstar_u = np.where(cal_mask)[0][uind]
        #cal_u_mask = np.zeros_like(cal_mask)
        #cal_u_mask[np.where(cal_mask)[0][uind]] = True
        num_cal_u = len(plate_mag_u)

        self.log.write('{:d} stars with unique magnitude'.format(num_cal_u),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)

        if num_cal_u < 10:
            self.log.write('Too few stars with unique magnitude!',
                           double_newline=False,
                           level=2,
                           event=73,
                           solution_num=solution_num)
            return

        plate_mag_u = self.sources['mag_auto'][ind_calibstar_u].data
        cat_bmag_u = self.sources['gaiaedr3_bpmag'][ind_calibstar_u].data
        cat_vmag_u = self.sources['gaiaedr3_rpmag'][ind_calibstar_u].data
        cat_natmag = cat_vmag_u + cterm * (cat_bmag_u - cat_vmag_u)
        self.sources['cat_natmag'][ind_calibstar_u] = cat_natmag

        # Eliminate outliers by constructing calibration curve from
        # the bright end and extrapolate towards faint stars

        # Find initial plate magnitude limit
        kde = sm.nonparametric.KDEUnivariate(plate_mag_u.astype(np.double))
        kde.fit()
        ind_maxden = np.argmax(kde.density)
        plate_mag_maxden = kde.support[ind_maxden]
        ind_dense = np.where(kde.density > 0.2 * kde.density.max())[0]
        brightmag = kde.support[ind_dense[0]]
        plate_mag_lim = kde.support[ind_dense[-1]]
        plate_mag_brt = plate_mag_u.min()
        plate_mag_mid = (plate_mag_brt + 0.5 * (plate_mag_lim - plate_mag_brt))

        if brightmag > plate_mag_mid:
            brightmag = plate_mag_mid

        # Check the number of stars in the bright end
        nb = (plate_mag_u <= plate_mag_mid).sum()

        if nb < 10:
            plate_mag_mid = plate_mag_u[9]

        # Construct magnitude cuts for outlier elimination
        ncuts = int((plate_mag_lim - plate_mag_mid) / 0.5) + 2
        mag_cuts = np.linspace(plate_mag_mid, plate_mag_lim, ncuts)
        ind_cut = np.where(plate_mag_u <= plate_mag_mid)[0]
        ind_good = np.arange(len(ind_cut))
        mag_cut_prev = mag_cuts[0]
        #mag_slope_prev = None

        # Loop over magnitude bins
        for mag_cut in mag_cuts[1:]:
            gpmag = plate_mag_u[ind_cut[ind_good]]
            gcmag = cat_natmag[ind_cut[ind_good]]

            nbright = (gpmag < brightmag).sum()

            if nbright < 20:
                alt_brightmag = (plate_mag_u.min() +
                                 (plate_mag_maxden - plate_mag_u.min()) * 0.5)
                nbright = (gpmag < alt_brightmag).sum()

            if nbright < 10:
                nbright = 10

            # Exclude bright outliers by fitting a line and checking
            # if residuals are larger than 2 mag
            ind_outliers = np.array([], dtype=int)
            xdata = gpmag[:nbright]
            ydata = gcmag[:nbright]
            p1 = np.poly1d(np.polyfit(xdata, ydata, 1))
            res = cat_natmag[ind_cut] - p1(plate_mag_u[ind_cut])
            ind_brightout = np.where((np.absolute(res) > 2.) &
                                     (plate_mag_u[ind_cut] <= xdata.max()))[0]

            if len(ind_brightout) > 0:
                ind_outliers = np.append(ind_outliers, ind_cut[ind_brightout])
                ind_good = np.setdiff1d(ind_good, ind_outliers)
                gpmag = plate_mag_u[ind_cut[ind_good]]
                gcmag = cat_natmag[ind_cut[ind_good]]
                nbright -= len(ind_brightout)

                if nbright < 10:
                    nbright = 10

            # Construct calibration curve
            # Set lowess fraction depending on the number of data points
            frac = 0.2

            if len(ind_good) < 500:
                frac = 0.2 + 0.3 * (500 - len(ind_good)) / 500.

            z = sm.nonparametric.lowess(gcmag,
                                        gpmag,
                                        frac=frac,
                                        it=3,
                                        delta=0.1,
                                        return_sorted=True)

            # In case there are less than 20 good stars, use only
            # polynomial
            if len(ind_good) < 20:
                weights = np.zeros(len(ind_good)) + 1.

                for i in np.arange(len(ind_good)):
                    indw = np.where(np.absolute(gpmag - gpmag[i]) < 1.0)[0]

                    if len(indw) > 2:
                        weights[i] = 1. / gcmag[indw].std()**2

                p2 = np.poly1d(np.polyfit(gpmag, gcmag, 2, w=weights))
                z[:, 1] = p2(z[:, 0])

            # Improve bright-star calibration
            if nbright > len(ind_good):
                nbright = len(ind_good)

            xbright = gpmag[:nbright]
            ybright = gcmag[:nbright]

            if nbright < 50:
                p2 = np.poly1d(np.polyfit(xbright, ybright, 2))
                vals = p2(xbright)
            else:
                z1 = sm.nonparametric.lowess(ybright,
                                             xbright,
                                             frac=0.4,
                                             it=3,
                                             delta=0.1,
                                             return_sorted=True)
                vals = z1[:, 1]

            weight2 = np.arange(nbright, dtype=float) / nbright
            weight1 = 1. - weight2
            z[:nbright, 1] = weight1 * vals + weight2 * z[:nbright, 1]

            # Improve faint-star calibration by fitting a 2nd order
            # polynomial
            # Currently, disable improvement
            improve_faint = False
            if improve_faint:
                ind_faint = np.where(gpmag > mag_cut_prev - 6.)[0]
                nfaint = len(ind_faint)

                if nfaint > 5:
                    xfaint = gpmag[ind_faint]
                    yfaint = gcmag[ind_faint]
                    weights = np.zeros(nfaint) + 1.

                    for i in np.arange(nfaint):
                        indw = np.where(
                            np.absolute(xfaint - xfaint[i]) < 0.5)[0]

                        if len(indw) > 2:
                            weights[i] = 1. / yfaint[indw].std()**2

                    p2 = np.poly1d(np.polyfit(xfaint, yfaint, 2, w=weights))
                    vals = p2(xfaint)

                    weight2 = (np.arange(nfaint, dtype=float) / nfaint)**1
                    weight1 = 1. - weight2
                    z[ind_faint,
                      1] = weight2 * vals + weight1 * z[ind_faint, 1]

            # Interpolate smoothed calibration curve
            s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)

            ind_cut = np.where(plate_mag_u <= mag_cut)[0]
            fit_mag = s(plate_mag_u[ind_cut])

            residuals = cat_natmag[ind_cut] - fit_mag
            mag_cut_prev = mag_cut

            ind_outliers = np.array([], dtype=int)

            # Mark as outliers those stars that deviate more than 1 mag
            ind_out = np.where(np.absolute(residuals) > 1.0)

            if len(ind_out) > 0:
                ind_outliers = np.append(ind_outliers, ind_cut[ind_out])
                ind_outliers = np.unique(ind_outliers)

            # Additionally clip outliers in small bins
            for mag_loc in np.linspace(plate_mag_brt, mag_cut, 100):
                mag_low = mag_loc - 0.5
                mag_high = mag_loc + 0.5
                ind_loc = np.where((plate_mag_u[ind_cut] > mag_low)
                                   & (plate_mag_u[ind_cut] < mag_high))[0]
                ind_loc = np.setdiff1d(ind_loc, ind_outliers)

                if len(ind_loc) >= 5:
                    rms_res = np.sqrt((residuals[ind_loc]**2).sum())
                    ind_locout = np.where(
                        np.absolute(residuals[ind_loc]) > 3. * rms_res)[0]

                    if len(ind_locout) > 0:
                        ind_outliers = np.append(ind_outliers,
                                                 ind_cut[ind_loc[ind_locout]])

                    ind_outliers = np.unique(ind_outliers)

            ind_good = np.setdiff1d(np.arange(len(ind_cut)), ind_outliers)

            #flt = sigma_clip(residuals, maxiters=None)
            #ind_good = ~flt.mask
            #ind_good = np.where(np.absolute(residuals) < 3*residuals.std())[0]

            # Stop outlier elimination if there is a gap in magnitudes
            if mag_cut - plate_mag_u[ind_cut[ind_good]].max() > 1.5:
                ind_faintout = np.where(plate_mag_u > mag_cut)[0]

                if len(ind_faintout) > 0:
                    ind_outliers = np.append(ind_outliers, ind_faintout)
                    ind_outliers = np.unique(ind_outliers)
                    ind_good = np.setdiff1d(np.arange(len(plate_mag_u)),
                                            ind_outliers)
                    self.log.write(
                        '{:d} faint stars eliminated as outliers'.format(
                            len(ind_faintout)),
                        double_newline=False,
                        level=4,
                        event=73,
                        solution_num=solution_num)

                self.log.write(
                    'Outlier elimination stopped due to a long gap '
                    'in magnitudes!',
                    double_newline=False,
                    level=2,
                    event=73,
                    solution_num=solution_num)
                break

            if len(ind_good) < 10:
                self.log.write(
                    'Outlier elimination stopped '
                    'due to insufficient number of stars left!',
                    double_newline=False,
                    level=2,
                    event=73,
                    solution_num=solution_num)
                break

        num_outliers = len(ind_outliers)
        self.log.write('{:d} outliers eliminated'.format(num_outliers),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)
        ind_good = np.setdiff1d(np.arange(len(plate_mag_u)), ind_outliers)
        self.log.write('{:d} stars after outlier elimination'.format(
            len(ind_good)),
                       double_newline=False,
                       level=4,
                       event=73,
                       solution_num=solution_num)

        if len(ind_good) < 10:
            self.log.write('Too few calibration stars ({:d}) after outlier '
                           'elimination!'.format(len(ind_good)),
                           double_newline=False,
                           level=2,
                           event=73,
                           solution_num=solution_num)
            return

        # Continue with photometric calibration without outliers

        # Study the distribution of magnitudes
        kde = sm.nonparametric.KDEUnivariate(plate_mag_u[ind_good].astype(
            np.double))
        kde.fit()
        ind_maxden = np.argmax(kde.density)
        plate_mag_maxden = kde.support[ind_maxden]
        ind_dense = np.where(kde.density > 0.2 * kde.density.max())[0]
        plate_mag_lim = kde.support[ind_dense[-1]]
        ind_valid = np.where(plate_mag_u[ind_good] <= plate_mag_lim)[0]
        num_valid = len(ind_valid)

        self.log.write(
            '{:d} calibration stars brighter than limiting magnitude'.format(
                num_valid),
            double_newline=False,
            level=4,
            event=73,
            solution_num=solution_num)

        #valid_cal_mask = np.zeros_like(cal_u_mask)
        #valid_cal_mask[np.where(cal_u_mask)[0][ind_good[ind_valid]]] = True
        ind_calibstar_valid = ind_calibstar_u[ind_good[ind_valid]]
        self.sources['phot_calib_flags'][ind_calibstar_valid] = 1

        if num_outliers > 0:
            #outlier_mask = np.zeros_like(cal_u_mask)
            #outlier_mask[np.where(cal_u_mask)[0][ind_outliers]]
            ind_calibstar_outlier = ind_calibstar_u[ind_outliers]
            self.sources['phot_calib_flags'][ind_calibstar_outlier] = 2

        cat_natmag = cat_natmag[ind_good[ind_valid]]
        plate_mag_u = plate_mag_u[ind_good[ind_valid]]
        plate_mag_brightest = plate_mag_u.min()
        frac = 0.2

        if num_valid < 500:
            frac = 0.2 + 0.3 * (500 - num_valid) / 500.

        z = sm.nonparametric.lowess(cat_natmag,
                                    plate_mag_u,
                                    frac=frac,
                                    it=3,
                                    delta=0.1,
                                    return_sorted=True)

        # Improve bright-star calibration

        # Find magnitude at which the frequency of stars becomes
        # larger than 500 mag^(-1)
        #ind_500 = np.where((kde.density*len(ind_good) > 500))[0][0]
        #brightmag = kde.support[ind_500]

        # Find magnitude at which density becomes larger than 0.05 of
        # the max density
        #ind_dense_005 = np.where(kde.density > 0.05*kde.density.max())[0]
        # Index of kde.support at which density becomes 0.05 of max
        #ind0 = ind_dense_005[0]
        #brightmag = kde.support[ind0]
        #nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        # Find magnitude at which density becomes larger than 0.2 of
        # the max density
        #brightmag = kde.support[ind_dense[0]]
        #nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        # Find the second percentile of magnitudes
        nbright = round(num_valid * 0.02)

        # Limit bright stars with 2000
        nbright = min([nbright, 2000])

        if nbright < 20:
            brightmag = (plate_mag_brightest +
                         (plate_mag_maxden - plate_mag_brightest) * 0.5)
            nbright = len(plate_mag_u[np.where(plate_mag_u < brightmag)])

        if nbright < 5:
            nbright = 5

        if nbright < 50:
            p2 = np.poly1d(
                np.polyfit(plate_mag_u[:nbright], cat_natmag[:nbright], 2))
            vals = p2(plate_mag_u[:nbright])
        else:
            z1 = sm.nonparametric.lowess(cat_natmag[:nbright],
                                         plate_mag_u[:nbright],
                                         frac=0.4,
                                         it=3,
                                         delta=0.1,
                                         return_sorted=True)
            vals = z1[:, 1]

        t = Table()
        t['plate_mag'] = plate_mag_u[:nbright]
        t['cat_natmag'] = cat_natmag[:nbright]
        t['fit_mag'] = vals
        basefn_solution = '{}-{:02d}'.format(self.basefn, solution_num)
        fn_tab = os.path.join(self.scratch_dir,
                              '{}_bright.fits'.format(basefn_solution))
        t.write(fn_tab, format='fits', overwrite=True)

        # Normalise density to max density of the bright range
        #d_bright = kde.density[:ind0] / kde.density[:ind0].max()
        # Find a smooth density curve and use values as weights
        #s_bright = InterpolatedUnivariateSpline(kde.support[:ind0],
        #                                        d_bright, k=1)
        #weight2 = s_bright(plate_mag_u[:nbright])

        # Linearly increasing weight
        weight2 = np.arange(nbright, dtype=float) / nbright

        weight1 = 1. - weight2

        # Merge two calibration curves with different weights
        z[:nbright, 1] = weight1 * vals + weight2 * z[:nbright, 1]

        # Interpolate the whole calibration curve
        s = InterpolatedUnivariateSpline(z[:, 0], z[:, 1], k=1)

        # Store the calibration curve
        self.calib_curve = s

        # Calculate residuals
        residuals = cat_natmag - s(plate_mag_u)

        # Smooth residuals with spline
        X = self.sources['x_source'][ind_calibstar_valid].data
        Y = self.sources['y_source'][ind_calibstar_valid].data

        if num_valid > 100:
            s_corr = SmoothBivariateSpline(X, Y, residuals, kx=5, ky=5)
        elif num_valid > 50:
            s_corr = SmoothBivariateSpline(X, Y, residuals, kx=3, ky=3)
        else:
            s_corr = None

        # Calculate new residuals and correct for dependence on
        # x, y, mag_auto. Do it only if the number of valid
        # calibration stars is larger than 500.
        s_magcorr = None

        if num_valid > 500:
            residuals2 = np.zeros(num_valid)

            for i in np.arange(num_valid):
                residuals2[i] = residuals[i] - s_corr(X[i], Y[i])

            # Create magnitude bins
            plate_mag_srt = np.sort(plate_mag_u)
            bin_mag = [(plate_mag_srt[99] + plate_mag_srt[0]) / 2.]
            bin_hw = [(plate_mag_srt[99] - plate_mag_srt[0]) / 2.]
            ind_lastmag = 99

            while True:
                if plate_mag_srt[ind_lastmag +
                                 100] - bin_mag[-1] - bin_hw[-1] > 0.5:
                    bin_edge = bin_mag[-1] + bin_hw[-1]
                    bin_mag.append(
                        (plate_mag_srt[ind_lastmag + 100] + bin_edge) / 2.)
                    bin_hw.append(
                        (plate_mag_srt[ind_lastmag + 100] - bin_edge) / 2.)
                    ind_lastmag += 100
                else:
                    bin_mag.append(bin_mag[-1] + bin_hw[-1] + 0.25)
                    bin_hw.append(0.25)
                    ind_lastmag = (plate_mag_srt <
                                   bin_mag[-1] + 0.25).sum() - 1

                # If less than 100 sources remain
                if ind_lastmag > num_valid - 101:
                    add_width = plate_mag_srt[-1] - bin_mag[-1] - bin_hw[-1]
                    bin_mag[-1] += add_width / 2.
                    bin_hw[-1] += add_width / 2.
                    break

            # Evaluate natmag correction in magnitude bins
            s_magcorr = []

            for i, (m, hw) in enumerate(zip(bin_mag, bin_hw)):
                binmask = (plate_mag_u > m - hw) & (plate_mag_u <= m + hw)
                #print(m, m-hw, m+hw, binmask.sum())
                smag = SmoothBivariateSpline(X[binmask],
                                             Y[binmask],
                                             residuals2[binmask],
                                             kx=3,
                                             ky=3)
                s_magcorr.append(smag)

        # Evaluate RMS errors from the calibration residuals
        rmse_list = generic_filter(residuals, _rmse, size=10)
        rmse_lowess = sm.nonparametric.lowess(rmse_list,
                                              plate_mag_u,
                                              frac=0.5,
                                              it=3,
                                              delta=0.1)
        s_rmse = InterpolatedUnivariateSpline(rmse_lowess[:, 0],
                                              rmse_lowess[:, 1],
                                              k=1)
        rmse = s_rmse(plate_mag_u)

        if self.write_phot_dir:
            np.savetxt(
                fcaldata,
                np.column_stack((plate_mag_u, cat_natmag, s(plate_mag_u),
                                 cat_natmag - s(plate_mag_u))))
            fcaldata.write('\n\n')

        # Store calibration statistics
        bright_limit = s(plate_mag_brightest).item()
        faint_limit = s(plate_mag_lim).item()

        self.phot_calib['num_calib_stars'] = num_valid
        self.phot_calib['num_bright_stars'] = nbright
        self.phot_calib['num_outliers'] = num_outliers
        self.phot_calib['bright_limit'] = bright_limit
        self.phot_calib['faint_limit'] = faint_limit
        self.phot_calib['mag_range'] = faint_limit - bright_limit
        self.phot_calib['rmse_min'] = rmse.min()
        self.phot_calib['rmse_median'] = np.median(rmse)
        self.phot_calib['rmse_max'] = rmse.max()
        self.phot_calib['plate_mag_brightest'] = plate_mag_brightest
        self.phot_calib['plate_mag_density02'] = kde.support[ind_dense[0]]
        self.phot_calib['plate_mag_brightcut'] = brightmag
        self.phot_calib['plate_mag_maxden'] = plate_mag_maxden
        self.phot_calib['plate_mag_lim'] = plate_mag_lim

        # Append calibration results to the list
        self.phot_calib_list.append(self.phot_calib)

        # Apply photometric calibration to sources
        sol_mask = ((self.sources['solution_num'] == solution_num) &
                    (self.sources['mag_auto'] < 90.))
        num_solstars = sol_mask.sum()
        mag_auto_sol = self.sources['mag_auto'][sol_mask]

        self.log.write(
            'Applying photometric calibration to sources '
            'in annular bins 1-9',
            level=3,
            event=74,
            solution_num=solution_num)

        # Correct magnitudes for positional effects
        if s_corr is not None:
            natmag_corr = self.sources['natmag_correction'][sol_mask]
            xsrc = self.sources['x_source'][sol_mask]
            ysrc = self.sources['y_source'][sol_mask]

            # Do a for-cycle, because SmoothBivariateSpline may crash with
            # large input arrays
            for i in np.arange(num_solstars):
                # Apply first correction (dependent only on coordinates)
                natmag_corr[i] = s_corr(xsrc[i], ysrc[i])

                # Apply second correction (dependent on mag_auto)
                if s_magcorr is not None:
                    corr_list = []

                    for smag in s_magcorr:
                        corr_list.append(smag(xsrc[i], ysrc[i])[0, 0])

                    smc = InterpolatedUnivariateSpline(bin_mag, corr_list, k=1)
                    natmag_corr[i] += smc(mag_auto_sol[i])

        # Assign magnitudes and errors
        self.sources['natmag'][sol_mask] = s(mag_auto_sol)
        self.sources['natmag_plate'][sol_mask] = s(mag_auto_sol)
        self.sources['natmag_error'][sol_mask] = s_rmse(mag_auto_sol)

        if s_corr is not None:
            self.sources['natmag_correction'][sol_mask] = natmag_corr
            self.sources['natmag'][sol_mask] += natmag_corr

        self.sources['color_term'][sol_mask] = cterm
        self.sources['natmag_residual'][ind_calibstar_u] = \
                (self.sources['cat_natmag'][ind_calibstar_u] -
                 self.sources['natmag'][ind_calibstar_u])

        # Apply flags and errors to sources outside the magnitude range
        # of calibration stars
        brange = (mag_auto_sol < plate_mag_brightest)
        ind = np.where(sol_mask)[0][brange]

        if brange.sum() > 0:
            self.sources['phot_range_flags'][ind] = 1
            self.sources['natmag_error'][ind] = s_rmse(plate_mag_brightest)

        brange = (mag_auto_sol > plate_mag_lim)
        ind = np.where(sol_mask)[0][brange]

        if brange.sum() > 0:
            self.sources['phot_range_flags'][ind] = 2
            self.sources['natmag_error'][ind] = s_rmse(plate_mag_lim)

        # Select stars with known external photometry
        bgaia = (sol_mask & ~self.sources['gaiaedr3_bpmag'].mask
                 & ~self.sources['gaiaedr3_rpmag'].mask)

        if bgaia.sum() > 0:
            bp_rp = self.sources['gaiaedr3_bp_rp'][bgaia]
            bp_rp_err = 0.

            self.sources['rpmag'][bgaia] = (self.sources['natmag'][bgaia] -
                                            cterm * bp_rp)
            self.sources['bpmag'][bgaia] = (self.sources['natmag'][bgaia] -
                                            (cterm - 1.) * bp_rp)
            rpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
                               (cterm_err * bp_rp)**2 + (cterm * bp_rp_err)**2)
            bpmagerr = np.sqrt(self.sources['natmag_error'][bgaia]**2 +
                               (cterm_err * bp_rp)**2 +
                               ((cterm - 1.) * bp_rp_err)**2)
            self.sources['rpmag_error'][bgaia] = rpmagerr
            self.sources['bpmag_error'][bgaia] = bpmagerr

        try:
            brightlim = min([
                cal['bright_limit'] for cal in self.phot_calib_list
                if cal['solution_num'] == solution_num
                and cal['iteration'] == iteration
            ])
            faintlim = max([
                cal['faint_limit'] for cal in self.phot_calib_list
                if cal['solution_num'] == solution_num
                and cal['iteration'] == iteration
            ])
            mag_range = faintlim - brightlim
        except Exception:
            brightlim = None
            faintlim = None
            mag_range = None

        if num_valid > 0:
            self.phot_calibrated = True
            self.bright_limit = brightlim
            self.faint_limit = faintlim

            self.log.write('Photometric calibration results (solution {:d}, '
                           'iteration {:d}): '
                           'bright limit {:.3f}, faint limit {:.3f}'.format(
                               solution_num, iteration, brightlim, faintlim),
                           level=4,
                           event=73,
                           solution_num=solution_num)

        if self.write_phot_dir:
            fcaldata.close()
Esempio n. 17
0
    def pseudo_inverse(self,
                       ds_f=1,
                       interpolation_method="griddata_custom",
                       interpolator_kwargs=None):
        """Find the displacement field of the inverse mapping.

        Notes
        -----
        Dangerously approximate and imprecise. Uses irregular grid interpolation.


        Parameters
        ----------
        ds_f : int, optional
            Downsampling factor for all the interpolations. Note that ds_1 = 1 means no downsampling.
            Applied both to the x and y coordinates.


        interpolation_method : {'griddata', 'bspline', 'rbf'}, optional
            Interpolation method to use.

        interpolator_kwargs : dict, optional
            Additional parameters passed to the interpolator.


        Returns
        -------
        DisplacementField
            An instance of the DisplacementField class representing the inverse mapping.

        """
        interpolator_kwargs = interpolator_kwargs or {}

        if interpolation_method != "itk":
            x, y = np.meshgrid(list(range(self.shape[1])),
                               list(range(self.shape[0])))
            xi = (y, x)
            x_r, y_r = x.ravel(), y.ravel()

            points = np.hstack((
                (y_r + self.delta_y.ravel()).reshape(-1, 1),
                (x_r + self.delta_x.ravel()).reshape(-1, 1),
            ))

            # Downsampling
            points = points[::ds_f]
            x_r_ds = x_r[::ds_f]
            y_r_ds = y_r[::ds_f]

            x_, y_ = points[:, 1], points[:, 0]

        if interpolation_method == "griddata":
            values_grid_x = griddata(points=points, values=x_r_ds, xi=xi)
            values_grid_y = griddata(points=points, values=y_r_ds, xi=xi)

            delta_x = values_grid_x.reshape(self.shape) - x
            delta_y = values_grid_y.reshape(self.shape) - y

        elif interpolation_method == "griddata_custom":
            # triangulation performed only once
            values_grid_x, values_grid_y = griddata_custom(
                points, x_r_ds, y_r_ds, xi)

            delta_x = values_grid_x.reshape(self.shape) - x
            delta_y = values_grid_y.reshape(self.shape) - y

        elif interpolation_method == "itk":
            # ~ 30 ms per image
            df_sitk = sitk.GetImageFromArray(
                np.concatenate(
                    (self.delta_x[..., np.newaxis], self.delta_y[...,
                                                                 np.newaxis]),
                    axis=2,
                ),
                isVector=True,
            )

            invertor = sitk.InvertDisplacementFieldImageFilter()

            # Set behaviour
            user_spec = {
                "n_iter": interpolator_kwargs.get("n_iter", 20),
                "tol": interpolator_kwargs.get("tol", 1e-3),
            }

            # invertor.EnforceBoundaryConditionOn()
            invertor.SetMeanErrorToleranceThreshold(
                user_spec["tol"])  # big effect
            invertor.SetMaximumNumberOfIterations(
                user_spec["n_iter"])  # big effect

            # Run
            df_sitk_inv = invertor.Execute(df_sitk)

            delta_xy = sitk.GetArrayFromImage(df_sitk_inv)

            delta_x, delta_y = delta_xy[..., 0], delta_xy[..., 1]

        elif interpolation_method == "noop":
            # for benchmarking purposes
            delta_x, delta_y = np.zeros(self.shape), np.zeros(self.shape)

        elif interpolation_method == "smooth_bspline":
            ip_delta_x = SmoothBivariateSpline(x_, y_, x_r_ds,
                                               **interpolator_kwargs)
            ip_delta_y = SmoothBivariateSpline(x_, y_, y_r_ds,
                                               **interpolator_kwargs)

            delta_x = ip_delta_x(x_r, y_r, grid=False).reshape(self.shape) - x
            delta_y = ip_delta_y(x_r, y_r, grid=False).reshape(self.shape) - y

        elif interpolation_method == "LSQ_bspline":
            tx_ds_f = interpolator_kwargs.pop(
                "tx_ds_f",
                1)  # downsampling factor on x knots, not part of scipy kwargs
            ty_ds_f = interpolator_kwargs.pop(
                "ty_ds_f",
                1)  # downsampling factor on y knots, not part of scipy kwargs
            auto = interpolator_kwargs.pop("auto", False)

            if auto:
                # SEEMS TO CRASH THE KERNEL
                # Create a grid center only where deformation takes place
                eps = 0.1
                range_x = np.unique(self.transformation[0][self.delta_x > eps])
                range_y = np.unique(self.transformation[1][self.delta_y > eps])

                tx_start, tx_end = max(0, np.floor(range_x.min())), min(
                    self.shape[1] - 1, np.ceil(range_x.max()))
                ty_start, ty_end = max(0, np.floor(range_y.min())), min(
                    self.shape[0] - 1, np.ceil(range_y.max()))

                tx = list(np.arange(tx_start, tx_end, dtype=int))[::tx_ds_f]
                ty = list(np.arange(ty_start, ty_end, dtype=int))[::ty_ds_f]

            else:
                tx, ty = (
                    list(range(self.shape[1]))[::tx_ds_f],
                    list(range(self.shape[0]))[::ty_ds_f],
                )

            ip_delta_x = LSQBivariateSpline(x_, y_, x_r_ds, tx, ty,
                                            **interpolator_kwargs)
            ip_delta_y = LSQBivariateSpline(x_, y_, y_r_ds, tx, ty,
                                            **interpolator_kwargs)

            delta_x = ip_delta_x(x_r, y_r, grid=False).reshape(self.shape) - x
            delta_y = ip_delta_y(x_r, y_r, grid=False).reshape(self.shape) - y

        elif interpolation_method == "rbf":
            ip_delta_x = Rbf(x_, y_, x_r_ds, **interpolator_kwargs)
            ip_delta_y = Rbf(x_, y_, y_r_ds, **interpolator_kwargs)

            delta_x = ip_delta_x(x_r, y_r).reshape(self.shape) - x
            delta_y = ip_delta_y(x_r, y_r).reshape(self.shape) - y

        else:
            raise ValueError("Unrecognized interpolation_method: {}".format(
                interpolation_method))

        return DisplacementField(delta_x, delta_y)