コード例 #1
0
plt.imshow(mag1)

#plt.figure()
#plt.imshow(rho2) #, origin='lower')

meshfile = r"d:\msh3.txt"
densfile = r"d:\den3.txt"
magfile = r"d:\mag3.txt"
graoutfile = r"d:\gra3.dat"
magoutfile = r"d:\mag3.dat"
graoutfile1 = r"d:\gra3n.dat"
magoutfile1 = r"d:\mag3n.dat"
area = (-200, 200, -600, 600, 0, 600) #x y z
shape = (100, 150, 1) # z y x
mesh = PrismMesh(area, shape)
mesh.addprop('density', 1000.*rho1.ravel()-2650.0001)
mesh.addprop('magnetization', mag1.ravel())
mesh.dump(meshfile, densfile, 'density') #输出网格到磁盘,MeshTools3D可视化
mesh.dump(meshfile, magfile, 'magnetization')
#生成核矩阵
kernel=[] 
narea = (-500, 500,-1000, 1000) #y x
nshape = (20, 40)
xp, yp, zp = gridder.regular(narea, nshape, z=-1)
prisms=[]
for p in mesh:
    prisms.append(p)
print('kernel')
inc, dec = 30, -4
kernelgz = prism.gz_kernel(xp, yp, zp, prisms)
for i, layer in enumerate(mesh.layers()):
コード例 #2
0
ファイル: matlab_bin.py プロジェクト: zhixin-xue/geoist
plt.figure()
plt.imshow(mag1)
#plt.figure()
#plt.imshow(rho2) #, origin='lower')

meshfile = r"d:\msh1.txt"
densfile = r"d:\den1.txt"
magfile = r"d:\mag1.txt"
graoutfile = r"d:\gra1.dat"
magoutfile = r"d:\mag1.dat"
graoutfile1 = r"d:\gra1n.dat"
magoutfile1 = r"d:\mag1n.dat"
area = (-100, 100, -750, 750, 0, 700)  #x y z
shape = (100, 150, 1)  # z y x
mesh = PrismMesh(area, shape)
mesh.addprop('density', 1000. * rho1.ravel() - 2529.99997)
mesh.addprop('magnetization', mag1.ravel())
mesh.dump(meshfile, densfile, 'density')
mesh.dump(meshfile, magfile, 'magnetization')  #输出网格到磁盘,MeshTools3D可视化
# #生成核矩阵
kernel = []
narea = (-500, 500, -1000, 1000)  #y x
nshape = (20, 40)
xp, yp, zp = gridder.regular(narea, nshape, z=-1)
prisms = []
for p in mesh:
    prisms.append(p)
print('kernel')
inc, dec = 30, -4
kernelgz = prism.gz_kernel(xp, yp, zp, prisms)
for i, layer in enumerate(mesh.layers()):
コード例 #3
0
ファイル: pfm_inv3.py プロジェクト: zhixin-xue/geoist
from geoist.inversion.mesh import PrismMesh
from geoist.vis import giplt
from geoist.inversion.regularization import Smoothness,Damping,TotalVariation
from geoist.inversion.pfmodel import SmoothOperator
from geoist.inversion.hyper_param import LCurve
from geoist.pfm import inv3d

meshfile = r"d:\msh.txt"
densfile = r"d:\den.txt"
#生成场源网格 NS-40km, EW-80km, 500个单元,z方向10层
area = (-20000, 20000, -40000, 40000, 2000, 32000) #NS EW Down
shape = (10, 20, 5) #nz ny nx
mesh = PrismMesh(area, shape)
density=np.zeros(shape)
density[3:8,9:12,1:4]=1.0 # z x y
mesh.addprop('density', density.ravel())
mesh.dump(meshfile, densfile, 'density') #输出网格到磁盘,MeshTools3D可视化
#生成核矩阵
kernel=[] 
narea = (-28750, 28750,-48750, 48750) #NS, EW
nshape = (30, 40) #NS, EW
depthz = []
xp, yp, zp = gridder.regular(narea, nshape, z=-1)
for i, layer in enumerate(mesh.layers()):
    for j, p in enumerate(layer):
        x1 = mesh.get_layer(i)[j].x1
        x2 = mesh.get_layer(i)[j].x2
        y1 = mesh.get_layer(i)[j].y1
        y2 = mesh.get_layer(i)[j].y2
        z1 = mesh.get_layer(i)[j].z1
        z2 = mesh.get_layer(i)[j].z2
コード例 #4
0
ファイル: pfm_mod_gen.py プロジェクト: Aicha-cher/geoist
@author: chens
"""
# 3rd imports
import matplotlib.pyplot as plt
import numpy as np
#from io import StringIO
# local imports
from geoist import gridder
from geoist.inversion import geometry
from geoist.pfm import prism
from geoist.inversion.mesh import PrismMesh
from geoist.vis import giplt
meshfile = r"d:\msh.txt"  #StringIO()
densfile = r"d:\den.txt"  #StringIO()
mesh = PrismMesh((0, 10, 0, 20, 0, 5), (5, 2, 2))
mesh.addprop('density', 1000.0 * np.random.rand(20))
mesh.dump(meshfile, densfile, 'density')
#print(meshfile.getvalue().strip())
#print(densfile.getvalue().strip())
model = []
for i, layer in enumerate(mesh.layers()):
    for j, p in enumerate(layer):
        #print(i,j, p)
        x1 = mesh.get_layer(i)[j].x1
        x2 = mesh.get_layer(i)[j].x2
        y1 = mesh.get_layer(i)[j].y1
        y2 = mesh.get_layer(i)[j].y2
        z1 = mesh.get_layer(i)[j].z1
        z2 = mesh.get_layer(i)[j].z2
        den = mesh.get_layer(i)[j].props
        model.append(geometry.Prism(x1, x2, y1, y2, z1, z2, den))
コード例 #5
0
class GravInvAbicModel:
    def __init__(self,
                 nzyx=[4, 4, 4],
                 smooth_components=None,
                 depth_constraint=None,
                 model_density=None,
                 refer_density=None,
                 weights=None,
                 source_volume=None,
                 smooth_on='m',
                 subtract_mean=True,
                 data_dir='/data/gravity_inversion'):
        self.gpu_id = 2
        self.subtract_mean = subtract_mean
        self._nz, self._ny, self._nx = nzyx
        self.smooth_on = smooth_on
        self.data_dir = data_dir
        self.gen_model_name()
        self.nobsx = nzyx[2]
        self.nobsy = nzyx[1]
        self.source_volume = source_volume
        if model_density is None:
            self._model_density = None
        else:
            self._model_density = model_density.ravel()
        self._smooth_components = smooth_components
        if smooth_components is None:
            self._smooth_components = (set(weights.keys()) -
                                       set(['depth', 'obs', 'bound', 'refer']))
        self.constraints = dict()
        self.constraints_val = dict()
        if depth_constraint is None:
            self.constraints['depth'] = np.ones(np.prod(nzyx))
            self.constraints_val['depth'] = None
        else:
            self.constraints['depth'] = (
                depth_constraint.reshape(-1, 1) * np.ones(
                    (1, self._nx * self._ny))).ravel()
            self.constraints_val['depth'] = 0
        if refer_density is None:
            self.constraints['refer'] = None
            self.constraints_val['refer'] = None
        else:
            self.constraints['refer'] = np.ones(self._nx * self._ny * self._nz)
            self.constraints_val['refer'] = refer_density.ravel()
        self._weights = weights
        if not 'depth' in self._weights.keys():
            self._weights['depth'] = 1.0
        self.smop = SmoothOperator()
        self.kernel_op = None
        self.abic_val = 0
        self.log_total_det_val = 0
        self.log_prior_det_val = 0
        self.log_obs_det_val = 0
        self.min_u_val = 0
        self.min_density = -1.0e4
        self.max_density = 1.0e4

    @property
    def source_volume(self):
        return self._source_volume

    @source_volume.setter
    def source_volume(self, value):
        self._source_volume = value
        self.gen_mesh()

    def gen_model_name(self):
        self.model_name = '{}x{}x{}'.format(self._nx, self._ny, self._nz)
        self.fname = pathlib.Path(
            self.data_dir) / pathlib.Path(self.model_name + '.h5')

    @property
    def weights(self):
        return self._weights

    @weights.setter
    def weights(self, values):
        self._weights = values
        if not self.kernel_op is None:
            self.kernel_op.weights = self._weights

    @property
    def smooth_components(self):
        return self._smooth_components

    @smooth_components.setter
    def smooth_components(self, values):
        self._smooth_components = values

    @property
    def refer_density(self):
        return self.constraints_val['refer'].reshape(self._nz, self._ny,
                                                     self._nx)

    @refer_density.setter
    def refer_density(self, value):
        self.constraints_val['refer'] = value.ravel()

    @property
    def nx(self):
        return self._nx

    @nx.setter
    def nx(self, value):
        self._nx = value
        self.nobsx = self._nx
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(
                self._nz, -1)[:, 0] * np.ones((1, self._nx * self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refer'] = np.ones(self._nx * self._ny * self._nz)

    @property
    def ny(self):
        return self._ny

    @ny.setter
    def ny(self, value):
        self._ny = value
        self.nobsy = self._ny
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(
                self._nz, -1)[:, 0] * np.ones((1, self._nx * self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refer'] = np.ones(self._nx * self._ny * self._nz)

    @property
    def nz(self):
        return self._nz

    @nz.setter
    def nz(self, value):
        self._nz = value
        self.gen_model_name()
        self.constraints['refer'] = np.ones(self._nx * self._ny * self._nz)
        print("Warning: nz changed. \nDon't forget setting depth constraints.")

    @property
    def model_density(self):
        return (self._model_density.reshape(self.nz, self.ny, self.nx))

    @model_density.setter
    def model_density(self, value):
        self._model_density = value.ravel()

    def gen_mesh(self, height=-1):
        shape = (self._nz, self._ny, self._nx)
        self.mesh = PrismMesh(self._source_volume, shape)
        density = np.ones(shape) * 1.0e3
        self.mesh.addprop('density', density.ravel())
        # generate obs grid
        # coordinate: x North-South,y East-West
        # gridder is in the order: (nx,ny)
        self.obs_area = (self._source_volume[0] + 0.5 * self.mesh.dims[0],
                         self._source_volume[1] - 0.5 * self.mesh.dims[0],
                         self._source_volume[2] + 0.5 * self.mesh.dims[1],
                         self._source_volume[3] - 0.5 * self.mesh.dims[1])
        obs_shape = (self.nobsx, self.nobsy)
        self.xp, self.yp, self.zp = gridder.regular(self.obs_area,
                                                    obs_shape,
                                                    z=height)

    def _gen_walsh_matrix(self):
        print('generating walsh_matrix')
        if os.path.exists(self.fname):
            with h5py.File(self.fname, mode='r') as f:
                if not 'walsh_matrix' in f.keys():
                    have_walsh_matrix = False
                else:
                    have_walsh_matrix = True
        else:
            have_walsh_matrix = False
        if have_walsh_matrix:
            return
        walsh_matrix = walsh.walsh_matrix(self._nx * self._ny * self._nz,
                                          normalized=True,
                                          ordering='sequence2',
                                          nxyz=(self._nx, self._ny, self._nz))
        walsh_matrix = walsh_matrix.astype(np.float32)
        step = self._nx * self._ny * self._nz // 4
        components = ['0', '1', '2', '3']
        with h5py.File(self.fname, mode='a') as f:
            fgroup = f.create_group('walsh_matrix')
            for i in range(4):
                fgroup.create_dataset(components[i],
                                      data=walsh_matrix[i * step:(i + 1) *
                                                        step, :])

    def gen_kernel(self, process=1):
        def calc_kernel(i):
            return prism.gz(self.xp[0:1], self.yp[0:1], self.zp[0:1],
                            [self.mesh[i]])

        if process > 1:  #Winodws MP running has possiblely errors.
            print('Number of process:', process)
            with Pool(processes=process) as pool:
                kernel0 = pool.map(calc_kernel, range(len(self.mesh)))
        else:
            kernel0 = [calc_kernel(i) for i in range(len(self.mesh))]

        self.kernel0 = np.array(kernel0).reshape(self.nz, self.ny, self.nx)
        self.kernel_op = AbicLSQOperator(
            self.kernel0,
            depth_constraint=self.constraints['depth'],
            smooth_components=self._smooth_components,
            refer_constraint=self.constraints['refer'],
            weights=self._weights)

    def _diagvec(self, vec=None, diag=None):
        if vec.ndim == 1:
            return vec * diag
        else:
            return vec * diag.reshape(1, -1)

    @timeit
    def walsh_transform(self, keys=None):
        if keys is None:
            keys = ['kernel'] + list(self.constraints.keys()) + list(
                self._smooth_components)
        else:
            keys = keys

        if use_gpu > 0:
            import cupy as cp

        is_stored = dict()
        for key in keys:
            is_stored[key] = False
        if os.path.exists(self.fname):
            with h5py.File(self.fname, mode='r') as f:
                for key in keys:
                    try:
                        if '3' in f[key].keys():
                            is_stored[key] = True
                        if key == 'depth':
                            res = f['depth'][
                                'constraint'][:] - self.constraints['depth']
                            res = np.linalg.norm(res) / np.linalg.norm(
                                self.constraints['depth'])
                            if res > 1.0e-3:
                                is_stored[key] = False
                        if key == 'kernel':
                            res = f['kernel']['source_volume'][:] - np.array(
                                self.source_volume)
                            res = np.linalg.norm(res) / np.linalg.norm(
                                np.array(self.source_volume))
                            if res > 1.0e-3:
                                is_stored[key] = False
                    except KeyError:
                        continue
        self._gen_walsh_matrix()
        logn = int(np.ceil(np.log2(self._nx * self._ny * self._nz)))
        norm_walsh = 1. / (np.sqrt(2)**logn)
        blocks = ['0', '1', '2', '3']
        matvec_op = {
            'kernel':
            self.kernel_op.gtoep.matvec,
            'depth':
            lambda x: self._diagvec(x, diag=np.sqrt(self.constraints['depth']))
        }
        for key in self._smooth_components:
            matvec_op[key] = lambda x: self.smop.derivation(
                x.reshape(-1, self.nz, self.ny, self.nx
                          ), component=key).reshape(x.shape[0], -1)
        is_stored['refer'] = True
        for key in keys:
            if is_stored[key]:
                print('walsh transformation of {} already exists.'.format(key))
                continue
            print('performing walsh transformation on {}.'.format(key))
            step = self.nx * self.ny * self.nz // 4
            if key == 'depth':
                step = self._nz
            with h5py.File(self.fname, mode='a') as f:
                try:
                    del f[key]
                except KeyError:
                    pass
                dxyz_group = f.create_group(key)
                walsh_group = f['walsh_matrix']
                for i in range(4):
                    print("\t progress {}/4".format(i))
                    part_walsh = walsh_group[blocks[i]][:]
                    if key == 'depth':
                        part_walsh = walsh_group[blocks[i]][:self._nz]
                    part_walsh = matvec_op[key](part_walsh)

                    if use_gpu > 0:
                        with cp.cuda.Device(self.gpu_id):
                            res = cp.zeros((step, step))
                            j = 0
                            while j * step < part_walsh.shape[1]:
                                tmp_block_gpu = cp.asarray(
                                    part_walsh[:, j * step:(j + 1) * step])
                                res += tmp_block_gpu @ tmp_block_gpu.T
                                j += 1
                            res = cp.asnumpy(res)
                            if key in self._smooth_components:
                                res[np.abs(res) < 1.0e-1 * norm_walsh] = 0.
                            tmp_block_gpu = None
                            mempool = cp.get_default_memory_pool()
                            pinned_mempool = cp.get_default_pinned_memory_pool(
                            )
                            mempool.free_all_blocks()
                            pinned_mempool.free_all_blocks()
                    else:
                        res = np.zeros((step, step))
                        j = 0
                        while j * step < part_walsh.shape[1]:
                            tmp_block_gpu = np.asarray(
                                part_walsh[:, j * step:(j + 1) * step])
                            res += tmp_block_gpu @ tmp_block_gpu.T
                            j += 1
                        if key in self._smooth_components:
                            res[np.abs(res) < 1.0e-1 * norm_walsh] = 0.

                    dxyz_group.create_dataset(blocks[i], data=res)
        if ('depth' in keys) and (not is_stored['depth']):
            with h5py.File(self.fname, mode='a') as f:
                try:
                    del f['depth']['constraint']
                except KeyError:
                    pass
                dxyz_group = f['depth']
                dxyz_group.create_dataset('constraint',
                                          data=self.constraints['depth'])
        if ('kernel' in keys) and (not is_stored['kernel']):
            with h5py.File(self.fname, mode='a') as f:
                try:
                    del f['kernel']['source_volume']
                except KeyError:
                    pass
                dxyz_group = f['kernel']
                dxyz_group.create_dataset('source_volume',
                                          data=np.array(self._source_volume))

    @property
    def depth_constraint(self):
        return (self.constraints['depth'].reshape(self._nz, -1)[:, 0])

    @depth_constraint.setter
    def depth_constraint(self, value):
        self.constraints['depth'] = (value.reshape(-1, 1) * np.ones(
            (1, self._nx * self._ny))).ravel()

    @timeit
    def forward(self, model_density=None):
        if model_density is None:
            model_density = self._model_density
        else:
            model_density = model_density.ravel()
        self.obs_data = self.kernel_op.gtoep.matvec(model_density)

    def _gen_rhs(self):
        self.rhs = self._weights['obs'] * self.kernel_op.gtoep.rmatvec(
            self.obs_data)
        if 'depth' in self._weights.keys():
            v = self.constraints['depth'] * self.constraints_val['refer']
        if 'refer' in self._weights.keys():
            self.rhs += (self._weights['refer'] * self._weights['depth'] *
                         self.constraints['depth'] * v)
        if self.smooth_on == 'm-m0':
            for key in self._smooth_components:
                tmp2 = v.reshape(-1, self._nz, self._ny, self._nx)
                tmp2 = self.smop.derivation(tmp2, component=key)
                tmp2 = self.smop.rderivation(tmp2, component=key)
                if v.ndim == 1:
                    self.rhs += self._weights[key] * self._weights[
                        'depth'] * self.constraints['depth'] * tmp2.ravel()
                else:
                    self.rhs += self._weights[key] * self._weights[
                        'depth'] * self.constraints['depth'] * tmp2.reshape(
                            v.shape[0], -1)

    @timeit
    def do_linear_solve(self):
        self.do_linear_solve_quiet()

    def do_linear_solve_quiet(self):
        self._gen_rhs()
        if self.subtract_mean:
            sum_obs = np.sum(self.obs_data)
            tmp_b = np.zeros(len(self.rhs) +
                             1)  #np.zeros(len(self.rhs+1)) #chenshi
            tmp_b[:-1] = self.rhs
            tmp_b[-1] = sum_obs
            tmp_op = AbicLSQOperator2(self.kernel_op)
            self.solution = spsparse.linalg.cg(tmp_op, tmp_b, tol=1.0e-5)[0]
        else:
            self.solution = spsparse.linalg.cg(self.kernel_op,
                                               self.rhs,
                                               tol=1.0e-5)[0]

    @timeit
    def calc_u(self, solved=False, x=None):
        return self.calc_u_quiet(solved, x)

    @timeit
    def calc_min_u(self, solved=False, x=None):
        return self.calc_u_quiet(solved, x)

    def calc_u_quiet(self, solved=False, x=None):
        if x is None:
            if not solved:
                self.do_linear_solve_quiet()
            x = self.solution
        self.min_u_val = self._weights['obs'] * np.linalg.norm(
            self.kernel_op.gtoep.matvec(x) - self.obs_data)**2
        if ('refer' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = np.sqrt(self._weights['depth']) * self.constraints['depth'] * v
        for key in self._smooth_components:
            tmp2 = self.smop.derivation(v.reshape(self._nz, self._ny,
                                                  self._nx),
                                        component=key)
            self.min_u_val += self._weights[key] * np.linalg.norm(
                tmp2.ravel())**2
        if 'refer' in self._weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = np.sqrt(
                    self._weights['depth']) * self.constraints['depth'] * v
            self.min_u_val += self._weights['refer'] * np.linalg.norm(v)**2
        return self.min_u_val

    def jac_u(self, x=None):
        res = self.kernel_op.matvec(x) - self.rhs
        return 2. * res

    def hessp_u(self, x, v):
        res = self.kernel_op.matvec(v)
        return 2. * res

    @timeit
    def bound_optimize(self, x0=None):
        density_bounds = Bounds(self.min_density, self.max_density)
        if x0 is None:
            x0 = np.zeros(
                self._nx * self._ny *
                self._nz) + (self.max_density - self.min_density) / 2.

        self.bound_solution = minimize(
            lambda x: self.calc_u_quiet(solved=True, x=x),
            x0,
            method='trust-constr',
            jac=self.jac_u,
            hessp=self.hessp_u,
            bounds=density_bounds,
        )

    def lasso_target(self, x):
        #        self.min_u_val = self._weights['obs']*np.linalg.norm(self.kernel_op.gtoep.matvec(x) - self.obs_data)**2
        self.min_u_val = self._weights['obs'] * np.linalg.norm(
            self.kernel_op.gtoep.matvec(x) - self.obs_data)
        if ('refer' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = np.sqrt(self._weights['depth']) * self.constraints['depth'] * v
        for key in self._smooth_components:
            tmp2 = self.smop.derivation(v.reshape(self._nz, self._ny,
                                                  self._nx),
                                        component=key)
            self.min_u_val += self._weights[key] * np.linalg.norm(tmp2.ravel())
        if 'refer' in self._weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = np.sqrt(
                    self._weights['depth']) * self.constraints['depth'] * v
            self.min_u_val += self._weights['refer'] * np.linalg.norm(v)
        return self.min_u_val

    def lasso_jac(self, x):
        #        jac = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(x)) - self.kernel_op.gtoep.rmatvec(self.obs_data)
        #        jac = 2.0*self._weights['obs']*jac
        jac = self.kernel_op.gtoep.rmatvec(
            self.kernel_op.gtoep.matvec(x)) - self.kernel_op.gtoep.rmatvec(
                self.obs_data)
        jac = self._weights['obs'] * jac
        norm_res = np.linalg.norm(self.obs_data -
                                  self.kernel_op.gtoep.matvec(x))
        jac = jac / norm_res
        if 'refer' in self.weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = self.constraints['depth'] * v
            norm_refer = np.linalg.norm(v)
            jac += self.weights['refer'] * self.weights[
                'depth'] * self.constraints['depth'] * v / norm_refer

        for key in self.smooth_components:
            v = x
            if 'depth' in self._weights.keys():
                v = self.constraints['depth'] * v
            tmp2 = self.smop.derivation(v.reshape(self._nz, self._ny,
                                                  self._nx),
                                        component=key)
            smooth_norm = np.linalg.norm(tmp2.ravel())
            tmp2 = self.smop.rderivation(tmp2, component=key)
            jac += self.weights[key] * self.weights[
                'depth'] * self.constraints['depth'] * tmp2.ravel(
                ) / smooth_norm
        return jac

    def lasso_hessp(self, x, v):
        #        res = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(v))
        #        res = 2.0*self._weights['obs']*res
        norm_res = np.linalg.norm(self.obs_data -
                                  self.kernel_op.gtoep.matvec(x))
        res = self.kernel_op.gtoep.rmatvec(
            self.kernel_op.gtoep.matvec(v)) / norm_res
        gradient_res = (
            self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(x)) -
            self.kernel_op.gtoep.rmatvec(self.obs_data))
        res -= np.dot(gradient_res, v) / norm_res**3 * gradient_res
        res *= self._weights['obs']
        if 'refer' in self.weights.keys():
            v2 = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v2 = self.constraints['depth'] * v2
            norm_refer = np.linalg.norm(v2)
            res += self.weights['refer'] * self.weights[
                'depth'] / norm_refer * self.constraints['depth'] * v
            grad_ref = self.constraints['depth'] * v2
            res -= (self.weights['refer'] * self.weights['depth'] *
                    (np.dot(v, grad_ref) / norm_refer**3 * grad_ref))
        for key in self.smooth_components:
            v2 = x
            if 'depth' in self._weights.keys():
                v2 = self.constraints['depth'] * v2
            tmp2 = self.smop.derivation(v2.reshape(self._nz, self._ny,
                                                   self._nx),
                                        component=key)
            smooth_norm = np.linalg.norm(tmp2.ravel())
            tmp2 = self.smop.rderivation(tmp2, component=key)
            grad_sm = self.constraints['depth'] * tmp2.ravel()

            tmp2 = v.reshape(self.nz, self.ny, self.nx)
            tmp2 = self.smop.derivation(tmp2, component=key)
            tmp2 = self.smop.rderivation(tmp2, component=key)
            tmp2 = self.constraints['depth'] * tmp2.ravel()
            res += (self._weights['depth'] * self._weights[key] / smooth_norm *
                    (tmp2 - np.dot(v, grad_sm) * grad_sm / smooth_norm**2))
        return res

    @timeit
    def lasso_optimize(self, x0=None):
        density_bounds = Bounds(self.min_density, self.max_density)
        if x0 is None:
            x0 = (np.random.rand(self._nx * self._ny * self._nz) *
                  (self.max_density - self.min_density) / 2. +
                  (self.max_density + self.min_density) / 2.)
        self.bound_solution = minimize(self.lasso_target,
                                       x0,
                                       method='trust-constr',
                                       jac=self.lasso_jac,
                                       hessp=self.lasso_hessp,
                                       bounds=density_bounds)

    def calc_res(self):
        self.residuals = dict()
        self.stds = dict()
        self.residuals['obs'] = np.linalg.norm(
            self.kernel_op.gtoep.matvec(self.solution) - self.obs_data)**2
        self.stds['obs'] = np.std(
            self.kernel_op.gtoep.matvec(self.solution) - self.obs_data)
        for key in self._smooth_components:
            tmp2 = self.solution.reshape(self._nz, self._ny, self._nx)
            if ('refer' in self.constraints_val.keys()) and (self.smooth_on
                                                             == 'm-m0'):
                tmp2 -= self.constraints_val['refer'].reshape(
                    self._nz, self._ny, self._nx)
            tmp2 = self.smop.derivation(tmp2, component=key)
            self.residuals[key] = np.linalg.norm(tmp2.ravel())**2
            self.stds[key] = np.std(tmp2.ravel())
        if 'refer' in self.constraints_val.keys():
            self.residuals['refer'] = np.linalg.norm(
                self.solution.ravel() -
                self.constraints_val['refer'].ravel())**2
            self.stds['refer'] = np.std(self.solution.ravel() -
                                        self.constraints_val['refer'].ravel())

    def calc_log_prior_total_det_quiet(self):
        self.log_prior_det_val = 0
        self.log_total_det_val = 0
        blocks = ['0', '1', '2', '3']
        prior_eigs = np.zeros(self._nx * self._ny * self._nz)
        total_eigs = np.zeros(self._nx * self._ny * self._nz)
        step = self._nx * self._ny * self._nz // 4
        try:
            depth_weight = self._weights['depth']
        except KeyError:
            depth_weight = 1.
        with h5py.File(self.fname, mode='r') as f:
            if 'depth' in self._weights.keys():
                depth_walsh = f['depth']['0'][:]
            for i_b, block in enumerate(blocks):
                tmp_block = np.zeros((step, step))
                for dxyz_name in self._smooth_components:
                    try:
                        dxyz_walsh = f[dxyz_name][block][:].reshape(
                            step // self._nz, self._nz, step // self._nz,
                            self._nz)
                        ein_path = np.einsum_path('mi,xiyj,jn->xmyn',
                                                  depth_walsh.T,
                                                  dxyz_walsh,
                                                  depth_walsh,
                                                  optimize='optimal')[0]
                        tmp_multi = np.einsum('mi,xiyj,jn->xmyn',
                                              depth_walsh.T,
                                              dxyz_walsh,
                                              depth_walsh,
                                              optimize=ein_path)
                        tmp_block += depth_weight * self._weights[
                            dxyz_name] * tmp_multi.reshape(step, step)
                    except KeyError:
                        pass
                if 'refer' in self._weights.keys():
                    tmp_multi_small = depth_walsh.T @ depth_walsh
                    for i in range(step // self._nz):
                        tmp_block[i * self._nz:(i + 1) * self._nz,
                                  i * self._nz:(i + 1) *
                                  self._nz] += depth_weight * self._weights[
                                      'refer'] * tmp_multi_small
                if use_gpu > 0:
                    import cupy as cp
                    with cp.cuda.Device(self.gpu_id):
                        tmp_block_gpu = cp.asarray(tmp_block, dtype=np.float32)
                        eigs = cp.linalg.eigvalsh(tmp_block_gpu)
                        prior_eigs[i_b * step:(i_b + 1) *
                                   step] = cp.asnumpy(eigs)
                        self.log_prior_det_val += cp.asnumpy(
                            cp.sum(cp.log(eigs)))
                        tmp_block_gpu = None
                        eigs = None
                        free_gpu()
                    tmp_block += self._weights['obs'] * f['kernel'][block][:]
                    with cp.cuda.Device(self.gpu_id):
                        tmp_block_gpu = cp.asarray(tmp_block, dtype=np.float32)
                        eigs = cp.linalg.eigvalsh(tmp_block_gpu)
                        total_eigs[i_b * step:(i_b + 1) *
                                   step] = cp.asnumpy(eigs)
                        self.log_total_det_val += cp.asnumpy(
                            cp.sum(cp.log(eigs)))
                        tmp_block_gpu = None
                        eigs = None
                        free_gpu()
                else:
                    tmp_block_gpu = np.asarray(tmp_block, dtype=np.float32)
                    eigs = np.linalg.eigvalsh(tmp_block_gpu)
                    prior_eigs[i_b * step:(i_b + 1) * step] = eigs
                    self.log_prior_det_val += np.sum(np.log(eigs))
                    tmp_block_gpu = None
                    eigs = None
                    tmp_block += self._weights['obs'] * f['kernel'][block][:]
                    tmp_block_gpu = np.asarray(tmp_block, dtype=np.float32)
                    eigs = np.linalg.eigvalsh(tmp_block_gpu)
                    total_eigs[i_b * step:(i_b + 1) * step] = eigs
                    self.log_total_det_val += np.sum(np.log(eigs))
                    tmp_block_gpu = None
                    eigs = None

        if use_gpu > 0:
            self.log_prior_det_val = cp.asnumpy(self.log_prior_det_val)
            self.log_total_det_val = cp.asnumpy(self.log_total_det_val)
        else:
            self.log_prior_det_val = self.log_prior_det_val
            self.log_total_det_val = self.log_total_det_val

        self.eigs = {'prior': prior_eigs, 'total': total_eigs}
        return self.log_prior_det_val, self.log_total_det_val

    @timeit
    def calc_log_prior_total_det(self):
        return self.calc_log_prior_total_det_quiet()

    def calc_log_obs_det_quiet(self):
        self.log_obs_det_val = np.log(self._weights['obs']) * len(
            self.obs_data)
        return self.log_obs_det_val

    @timeit
    def calc_log_obs_det(self):
        return self.calc_log_obs_det_quiet()

    @timeit
    def calc_abic(self):
        '''-log_prior_det_value+log_total_det-log_obs_det+min_u'''
        self.calc_log_prior_total_det()
        self.calc_u()
        self.calc_log_obs_det()
        self.abic_val = (self.log_total_det_val + self.min_u_val -
                         self.log_prior_det_val - self.log_obs_det_val)
        return self.abic_val

    def calc_abic_quiet(self):
        '''-log_prior_det_value+log_total_det-log_obs_det+min_u'''
        self.calc_log_prior_total_det_quiet()
        self.calc_u_quiet()
        self.calc_log_obs_det_quiet()
        self.abic_val = (self.log_total_det_val + self.min_u_val -
                         self.log_prior_det_val - self.log_obs_det_val)
        return self.abic_val

    def _abic_optimize_exp(self):
        #optimize_keys = list(set(self._weights.keys())-set(['depth']))
        optimize_keys = list(self._weights.keys())

        def abic_target(x):
            for i, key in enumerate(optimize_keys):
                self._weights[key] = np.exp(x[i])
            return self.calc_abic_quiet()

        x0 = np.zeros(len(self._weights))
        for i, key in enumerate(optimize_keys):
            x0[i] = np.log(self._weights[key])
        self.abic_optimize_summary = minimize(abic_target,
                                              x0,
                                              method='Nelder-Mead')

    def _abic_optimize_bound(self):
        optimize_keys = list(self._weights.keys())

        def abic_target(x):
            for i, key in enumerate(optimize_keys):
                self._weights[key] = x[i]
            return self.calc_abic_quiet()

        x0 = np.zeros(len(self._weights))
        for i, key in enumerate(optimize_keys):
            x0[i] = self._weights[key]
        weight_constraint = LinearConstraint(np.eye(len(optimize_keys)), 0.,
                                             np.inf)
        self.abic_optimize_summary = minimize(abic_target,
                                              x0,
                                              method='COBYLA',
                                              constraints=weight_constraint)

    @timeit
    def abic_optimize(self):
        self._abic_optimize_bound()

    @timeit
    def para_grad(self, x):
        pass

    def u_bound(self):
        pass

    def print_summary(self):
        print('abic values:{}'.format(self.abic_val))
        print('log total det:{}'.format(self.log_total_det_val))
        print('log prior det:{}'.format(self.log_prior_det_val))
        print('log obs det:{}'.format(self.log_obs_det_val))
        print('min u:{}'.format(self.min_u_val))
        print('std:', end=' ')
        print(self.stds)
        print('1/var:', end=' ')
        print({k: 1. / v**2 for k, v in self.stds.items()})
        print('norms:', end=' ')
        print(self.residuals)
コード例 #6
0
ファイル: abic.py プロジェクト: zhixin-xue/geoist
class GravInvAbicModel:
    def __init__(self,conf_file=None,**kwargs):
        self.confs = {'nzyx':[4,4,4],
                      'gpu_id':2,
                      'smooth_components':None,
                      'depth_constraint':None,
                      'model_density':None,
                      'refer_densities':None,
                      'weights':None,
                      'source_volume':None,
                      'smooth_on':'m',
                      'subtract_mean':False,
                      'optimize_keys':None,
                      'mode':'walsh',
                      'data_dir':'/data/gravity_inversion'}
        confs = dict()
        if not conf_file is None:
            with open(conf_file) as f:
                confs = json.load(f)
        self.confs = {**self.confs,**confs,**kwargs}
        self._nz,self._ny,self._nx = self.confs['nzyx']
        self.nobsx = self._nx
        self.nobsy = self._ny
        self.gen_model_name()
        self.source_volume = self.confs['source_volume']
        if self.confs['model_density'] is None:
            self._model_density = None
        else:
            self._model_density = self.confs['model_density'].ravel()
        self._smooth_components = self.confs['smooth_components']
        if self.confs['smooth_components'] is None:
            self._smooth_components = (set(self.confs['weights'].keys()) - set(['depth',
                                                                  'obs',
                                                                  'bound',
                                                                  'refers']))
        self.constraints = dict()
        self.constraints_val = dict()
        if self.confs['depth_constraint'] is None:
            self.constraints['depth'] = np.ones(np.prod(self.nzyx))
            self.constraints_val['depth'] = None
        else:
            self.constraints['depth'] = (self.confs['depth_constraint'].reshape(-1,1)*np.ones((1,self._nx*self._ny))).ravel()
            self.constraints_val['depth'] = 0
        if self.confs['refer_densities'] is None:
            self.constraints['refers'] = None
            self.constraints_val['refers'] = None
        else:
            self.constraints['refers'] = np.ones(self._nx*self._ny*self._nz)
            self.refer_densities = self.confs['refer_densities']
            self.n_refer = len(self.confs['refer_densities'])
        self._weights = self.confs['weights']
        if not 'depth' in self._weights.keys():
            self._weights['depth'] = 1.0
        self.smop = SmoothOperator()
        self.kernel_op = None
        self.abic_val = 0
        self.log_total_det_val = 0
        self.log_prior_det_val = 0
        self.log_obs_det_val = 0
        self.min_u_val = 0
        self.min_density = -1.0e4
        self.max_density = 1.0e4
        self.optimize_log = {'parameters':[],'abic_vals':[]}

    def gen_model_name(self):
        '''generate a file name to save data of current model. The model will be
        saved in ``self.data_dir`` directory.
        '''
        self.model_name = '{}x{}x{}'.format(self._nx,self._ny,self._nz)
        self.fname = pathlib.Path(self.data_dir)/pathlib.Path(self.model_name+'.h5')

    @property
    def gpuid(self):
        ''' Which gpu card will be used. Ignored if set ``use_gpu=0``.
        '''
        return self.confs['gpuid']
    @gpuid.setter
    def gpuid(self,values):
        self.confs['gpuid'] = values

    @property
    def smooth_on(self):
        '''Which variable should be smoothed. Only ``'m'``
        is supported right now, which means smooth on density.
        '''
        return self.confs['smooth_on']
    @smooth_on.setter
    def smooth_on(self,values):
        self.confs['smooth_on'] = values

    @property
    def mode(self):
        '''How to calculate determinants. Could be 'naive' or 'walsh'. '''
        return self.confs['mode']
    @mode.setter
    def mode(self,values):
        self.confs['mode'] = values

    @property
    def data_dir(self):
        '''Specify a path to save model data.
        '''
        return self.confs['data_dir']
    @data_dir.setter
    def data_dir(self,values):
        self.confs['data_dir'] = values

    @property
    def source_volume(self):
        ''' The extent of source volume, in the form of ``[xmin,xmax,ymin,ymax,zmin,zmax]``.
        '''
        return self.confs['source_volume']
    @source_volume.setter
    def source_volume(self,value):
        self._source_volume = value
        self.confs['source_volume'] = value
        self.gen_mesh()

    @property
    def subtract_mean(self):
        return self.confs['subtract_mean']
    @subtract_mean.setter
    def subtract_mean(self,values):
        self.confs['subtract_mean'] = values

    @property
    def weights(self):
        ''' inverse variance of each distributions.
        '''
        return self._weights
    @weights.setter
    def weights(self,values):
        self._weights = values
        self.confs['weights'] = values
        if not self.kernel_op is None:
            self.kernel_op.weights = self._weights

    @property
    def optimize_keys(self):
        ''' inverse variance of each distributions.
        '''
        return self.confs['optimize_keys']
    @optimize_keys.setter
    def optimize_keys(self,values):
        self.confs['optimize_keys'] = values

    @property
    def smooth_components(self):
        ''' partial derivatives used as smooth components.
        Example: ``'dxx'`` means :math:`\frac{\partial^2 m}{\partial x^2}`
        '''
        return self._smooth_components
    @smooth_components.setter
    def smooth_components(self,values):
        self._smooth_components = values
        self.confs['smooth_components'] = _smooth_components


    @property
    def refer_densities(self):
        '''reference density. The length of this vector should match the length
        of model density.
        '''
        tmp = []
        for density in self.constraints_val['refers']:
            tmp.append(density.reshape(self._nz,self._ny,self._nx))
        return tmp
    @refer_densities.setter
    def refer_densities(self,value):
        tmp = []
        for density in value:
            tmp.append(density.ravel())
        self.constraints_val['refers'] = tmp

    @property
    def nzyx(self):
        '''model dimension, with the form of ``[nz,ny,nx]``
        '''
        return self.confs['nzyx']
    @nzyx.setter
    def nzyx(self,values):
        self.confs['nzyx'] = values
        self._nz,self._ny,self._nx = values
        self.nobsx = values[2]
        self.nobsy = values[1]
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(self._nz,-1)[:,0]*np.ones((1,self._nx*self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refers'] = np.ones(self._nx*self._ny*self._nz)

    @property
    def nx(self):
        ''' Number of cells along x-axis.
        '''
        return self._nx
    @nx.setter
    def nx(self,value):
        self._nx = value
        self.nobsx = self._nx
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(self._nz,-1)[:,0]*np.ones((1,self._nx*self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refers'] = np.ones(self._nx*self._ny*self._nz)

    @property
    def ny(self):
        ''' Number of cells along y-axis.
        '''
        return self._ny
    @ny.setter
    def ny(self,value):
        self._ny = value
        self.nobsy = self._ny
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(self._nz,-1)[:,0]*np.ones((1,self._nx*self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refers'] = np.ones(self._nx*self._ny*self._nz)

    @property
    def nz(self):
        ''' Number of cells along z-axis.
        '''
        return self._nz
    @nz.setter
    def nz(self,value):
        self._nz = value
        self.gen_model_name()
        self.constraints['refers'] = np.ones(self._nx*self._ny*self._nz)
        print("Warning: nz changed. \nDon't forget setting depth constraints.")

    @property
    def model_density(self):
        ''' This vector is used for calculating gravity field, i.e. forward calculating.
        '''
        return(self._model_density.reshape(self.nz,self.ny,self.nx))
    @model_density.setter
    def model_density(self,value):
        self._model_density = value.ravel()
        self.confs['model_density'] = self._model_density

    def gen_mesh(self,height = -1):
        ''' Generate mesh of the model.
        Args:
            height (float): height of the observations.
        '''
        shape = (self._nz, self._ny, self._nx)
        self.mesh = PrismMesh(self._source_volume, shape)
        density = np.ones(shape)*1.0e3
        self.mesh.addprop('density', density.ravel())
        # generate obs grid
        # coordinate: x North-South,y East-West
        # gridder is in the order: (nx,ny)
        self.obs_area = (self._source_volume[0]+0.5*self.mesh.dims[0],
                         self._source_volume[1]-0.5*self.mesh.dims[0],
                         self._source_volume[2]+0.5*self.mesh.dims[1],
                         self._source_volume[3]-0.5*self.mesh.dims[1])
        obs_shape = (self.nobsx, self.nobsy)
        self.xp, self.yp, self.zp = gridder.regular(self.obs_area, obs_shape, z=height)

    def _gen_walsh_matrix(self):
        '''generate walsh matrix in the order of sequence2.
        '''
        print('generating walsh_matrix')
        if os.path.exists(self.fname):
            with h5py.File(self.fname,mode='r') as f:
                if not 'walsh_matrix' in f.keys():
                    have_walsh_matrix = False
                else:
                    have_walsh_matrix = True
        else:
            have_walsh_matrix = False
        if have_walsh_matrix:
            return
        walsh_matrix = walsh.walsh_matrix(self._nx*self._ny*self._nz,
                                          normalized=True,
                                          ordering='sequence2',
                                          nxyz=(self._nx,self._ny,self._nz))
        walsh_matrix = walsh_matrix.astype(np.float64)
        step = self._nx*self._ny*self._nz//4
        components = ['0','1','2','3']
        with h5py.File(self.fname,mode='a') as f:
            fgroup = f.create_group('walsh_matrix')
            for i in range(4):
                fgroup.create_dataset(components[i],data=walsh_matrix[i*step:(i+1)*step,:])

    def gen_kernel(self, process = 1):
        '''generate kernel matrix. Because it is multilevel toeplitz matrix, only
        the first row are needed (i.e. all source cell contribution to the first
        observation position).
        .. note:: The coordinate system of the input parameters is to be
            x -> North, y -> East and z -> **DOWN**.
        .. note:: All input values in **SI** units(!) and output in **mGal**!
        '''
        def calc_kernel(i):
            return prism.gz(self.xp[0:1],self.yp[0:1],self.zp[0:1],[self.mesh[i]])
        if process > 1: #Winodws MP running has possiblely errors.
            print('Number of process:',process)
            with Pool(processes=process) as pool:
                kernel0 = pool.map(calc_kernel,range(len(self.mesh)))
        else:
           kernel0 = [calc_kernel(i) for i in range(len(self.mesh))]

        self.kernel0 = np.array(kernel0).reshape(self.nz,self.ny,self.nx)
        self.kernel_op = AbicLSQOperator(self.kernel0,
                                         depth_constraint=self.constraints['depth'],
                                         smooth_components=self._smooth_components,
                                         refer_constraints=self.constraints['refers'],
                                         weights=self._weights)

    def load_kernel(self,fname):
        '''load kernel matrix from file. Only the first row are needed.
        File format follows numpy's savetxt.
        '''
        try:
            self.kernel0 = np.loadtxt(fname)
        except OSError:
            fname = pathlib.Path(self.data_dir)/pathlib.Path(fname)
            self.kernel0 = np.loadtxt(fname)
        self.kernel0 = np.array(self.kernel0).reshape(self.nz,self.ny,self.nx)
        self.kernel_op = AbicLSQOperator(self.kernel0,
                                         depth_constraint=self.constraints['depth'],
                                         smooth_components=self._smooth_components,
                                         refer_constraints=self.constraints['refers'],
                                         weights=self._weights)

    def _diagvec(self,vec=None,diag=None):
        if vec.ndim == 1:
            return vec * diag
        else:
            return  vec * diag.reshape(1,-1)

    @timeit
    def walsh_transform(self,keys=None):
        '''walsh transform of kernel matrix and constraint matrices.
        '''
        if keys is None:
            keys = ['kernel'] + list(self.constraints.keys()) + list(self._smooth_components)
        else:
            keys = keys

        if use_gpu > 0:
            import cupy as cp

        is_stored = dict()
        for key in keys:
            is_stored[key] = False
        if os.path.exists(self.fname):
            with h5py.File(self.fname,mode='r') as f:
                for key in keys:
                    try:
                        if '3' in f[key].keys():
                            is_stored[key] = True
                        if key == 'depth':
                            res = f['depth']['constraint'][:] - self.constraints['depth']
                            res = np.linalg.norm(res)/np.linalg.norm(self.constraints['depth'])
                            if res > 1.0e-3:
                                is_stored[key] = False
                        if key == 'kernel':
                            res = f['kernel']['source_volume'][:] - np.array(self.source_volume)
                            res = np.linalg.norm(res)/np.linalg.norm(np.array(self.source_volume))
                            if res > 1.0e-3:
                                is_stored[key] = False
                    except KeyError:
                        continue
        self._gen_walsh_matrix()
        logn = int(np.ceil(np.log2(self._nx*self._ny*self._nz)))
        norm_walsh = 1./(np.sqrt(2)**logn)
        blocks = ['0','1','2','3']
        matvec_op = {'kernel':self.kernel_op.gtoep.matvec,
                     'depth': lambda x: self._diagvec(x,diag=np.sqrt(self.constraints['depth']))
                 }
        for key in self._smooth_components:
            matvec_op[key] = lambda x: self.smop.derivation(x.reshape(-1,self.nz,self.ny,self.nx),component=key).reshape(x.shape[0],-1)
        is_stored['refers'] = True
        for key in keys:
            if is_stored[key]:
                print('walsh transformation of {} already exists.'.format(key))
                continue
            print('performing walsh transformation on {}.'.format(key))
            step = self.nx*self.ny*self.nz // 4
            if key == 'depth':
                step = self._nz
            with h5py.File(self.fname,mode='a') as f:
                try:
                    del f[key]
                except KeyError:
                    pass
                dxyz_group = f.create_group(key)
                walsh_group = f['walsh_matrix']
                for i in range(4):
                    print("\t progress {}/4".format(i))
                    part_walsh = walsh_group[blocks[i]][:]
                    if key == 'depth':
                        part_walsh = walsh_group[blocks[i]][:self._nz]
                    part_walsh = matvec_op[key](part_walsh)

                    if use_gpu > 0:
                        with cp.cuda.Device(self.gpu_id):
                            res = cp.zeros((step,step))
                            j = 0
                            while j*step < part_walsh.shape[1]:
                                tmp_block_gpu = cp.asarray(part_walsh[:,j*step:(j+1)*step])
                                res += tmp_block_gpu @ tmp_block_gpu.T
                                j += 1
                            res = cp.asnumpy(res)
                            if key in self._smooth_components:
                                res[np.abs(res)<1.0e-1*norm_walsh] = 0.
                            tmp_block_gpu = None
                            mempool = cp.get_default_memory_pool()
                            pinned_mempool = cp.get_default_pinned_memory_pool()
                            mempool.free_all_blocks()
                            pinned_mempool.free_all_blocks()
                    else:
                        res = np.zeros((step,step))
                        j = 0
                        while j*step < part_walsh.shape[1]:
                            tmp_block_gpu = np.asarray(part_walsh[:,j*step:(j+1)*step])
                            res += tmp_block_gpu @ tmp_block_gpu.T
                            j += 1
                        if key in self._smooth_components:
                            res[np.abs(res)<1.0e-1*norm_walsh] = 0.

                    dxyz_group.create_dataset(blocks[i],data=res)
        if ('depth' in keys) and (not is_stored['depth']):
            with h5py.File(self.fname,mode='a') as f:
                try:
                    del f['depth']['constraint']
                except KeyError:
                    pass
                dxyz_group = f['depth']
                dxyz_group.create_dataset('constraint',data=self.constraints['depth'])
        if ('kernel' in keys) and (not is_stored['kernel']):
            with h5py.File(self.fname,mode='a') as f:
                try:
                    del f['kernel']['source_volume']
                except KeyError:
                    pass
                dxyz_group = f['kernel']
                dxyz_group.create_dataset('source_volume',data=np.array(self._source_volume))

    @property
    def depth_constraint(self):
        '''Diagonal of the depth constraint matrix.
        One real number for each layer. Stored in an vector.
        '''
        return(self.constraints['depth'].reshape(self._nz,-1)[:,0])
    @depth_constraint.setter
    def depth_constraint(self,value):
        self.constraints['depth'] = (value.reshape(-1,1)*np.ones((1,self._nx*self._ny))).ravel()

    @timeit
    def forward(self,model_density=None):
        ''' Calculate gravity field from model_density.
        Args:
            model_density (np.Array): densities of each model cell. Reshaped from
            (nz,ny,nx) to (nz*ny*nx)
        '''
        if model_density is None:
            model_density = self._model_density
        else:
            model_density = model_density.ravel()
        self.obs_data = self.kernel_op.gtoep.matvec(model_density)

    def _gen_rhs(self):
        '''generate right hand side of the least square equation of min_u.
        '''
        self.rhs = self._weights['obs']*self.kernel_op.gtoep.rmatvec(self.obs_data)
        if 'refers' in self._weights.keys():
            for i,refer_weight in enumerate(self._weights['refers']):
                v = self.constraints_val['refers'][i].ravel()
                if 'depth' in self._weights.keys():
                    v = self.constraints['depth']*v
                self.rhs += (refer_weight
                             *self.constraints['depth']
                             *v)
        if self.smooth_on == 'm-m0':
            for key in self._smooth_components:
                tmp2 = v.reshape(-1,self._nz,self._ny,self._nx)
                tmp2 = self.smop.derivation(tmp2,component=key)
                tmp2 = self.smop.rderivation(tmp2,component=key)
                if v.ndim == 1:
                    self.rhs += self._weights[key]*self._weights['depth']*self.constraints['depth']*tmp2.ravel()
                else:
                    self.rhs += self._weights[key]*self._weights['depth']*self.constraints['depth']*tmp2.reshape(v.shape[0],-1)

    @timeit
    def do_linear_solve(self,tol=1.0e-5):
        ''' solve the least square equation of min_u.
        Args:
            tol (float): tol of CG algorithm.
        '''
        self.do_linear_solve_quiet(tol=tol)

    def do_linear_solve_quiet(self,tol=1.0e-5):
        ''' solve the least square equation of min_u with minimum of message printed.
        Args:
            tol (float): tol of CG algorithm.
        '''
        self._gen_rhs()
        if self.subtract_mean:
            sum_obs = np.sum(self.obs_data)
            tmp_b = np.zeros(len(self.rhs)+1) #np.zeros(len(self.rhs+1)) #chenshi
            tmp_b[:-1] = self.rhs
            tmp_b[-1] = sum_obs
            tmp_op = AbicLSQOperator2(self.kernel_op)
            self.solution = spsparse.linalg.cg(tmp_op,tmp_b,tol=tol)[0]
        else:
            self.solution = spsparse.linalg.cg(self.kernel_op,self.rhs,tol=tol)[0]

    @timeit
    def calc_u(self,solved=False,x=None):
        '''calc min_u value.
        Args:
            solved (bool): whether or not the least square equation solved already.
            x (array): use this x to calculate value of U instead of doing optimization.
        '''
        return self.calc_u_quiet(solved,x)

    @timeit
    def calc_min_u(self,solved=False,x=None):
        '''Another name of calc_u. We keep this function for backward compatability.
        '''
        return self.calc_u_quiet(solved,x)

    def calc_u_quiet(self,solved=False,x=None):
        '''calc min_u value with minimum message printed out.
        Args:
            solved (bool): whether or not the least square equation solved already.
            x (array): use this x to calculate value of U instead of doing optimization.
        '''
        if x is None:
            if not solved:
                self.do_linear_solve_quiet()
            x = self.solution
        self.min_u_val = self._weights['obs']*np.linalg.norm(self.kernel_op.gtoep.matvec(x) - self.obs_data)**2
        if ('refers' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = self.constraints['depth']*v
        for key in self._smooth_components:
            tmp2 = self.smop.derivation(v.reshape(self._nz,self._ny,self._nx),
                                        component=key)
            self.min_u_val += self._weights[key]*np.linalg.norm(tmp2.ravel())**2
        if 'refers' in self._weights.keys():
            for i,refer_density in enumerate(self.constraints_val['refers']):
                v = x - refer_density
                if 'depth' in self._weights.keys():
                    v = self.constraints['depth']*v
                self.min_u_val += self._weights['refers'][i] *np.linalg.norm(v)**2
        return self.min_u_val

    def jac_u(self,x=None):
        ''' jacobian of the function U.
        '''
        res = self.kernel_op.matvec(x) - self.rhs
        return 2.*res

    def hessp_u(self,x,v):
        '''Hessian of the function U.
        '''
        res = self.kernel_op.matvec(v)
        return 2.*res

    @timeit
    def bound_optimize(self,x0=None):
        '''optimize function U using boundary constraint.
        '''
        density_bounds = Bounds(self.min_density,self.max_density)
        if x0 is None:
            x0 = np.zeros(self._nx*self._ny*self._nz)+(self.max_density - self.min_density)/2.

        self.bound_solution = minimize(lambda x:self.calc_u_quiet(solved=True,x=x),
                                       x0,
                                       method='trust-constr',
                                       jac=self.jac_u,
                                       hessp=self.hessp_u,
                                       bounds=density_bounds,
                                       )

    def lasso_target(self,x):
        '''Minimize the 1-norm instead 2-norm of the model equation. This function
           is used to the target function.
        '''
#        self.min_u_val = self._weights['obs']*np.linalg.norm(self.kernel_op.gtoep.matvec(x) - self.obs_data)**2
        self.min_u_val = self._weights['obs']*np.linalg.norm(self.kernel_op.gtoep.matvec(x) - self.obs_data)
        if ('refer' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = np.sqrt(self._weights['depth'])*self.constraints['depth']*v
        for key in self._smooth_components:
            tmp2 = self.smop.derivation(v.reshape(self._nz,self._ny,self._nx),
                                        component=key)
            self.min_u_val += self._weights[key]*np.linalg.norm(tmp2.ravel())
        if 'refer' in self._weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = np.sqrt(self._weights['depth'])*self.constraints['depth']*v
            self.min_u_val += self._weights['refer'] *np.linalg.norm(v)
        return self.min_u_val

    def lasso_jac(self,x):
        '''jacobian of the lasso target function.
        '''
#        jac = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(x)) - self.kernel_op.gtoep.rmatvec(self.obs_data)
#        jac = 2.0*self._weights['obs']*jac
        jac = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(x)) - self.kernel_op.gtoep.rmatvec(self.obs_data)
        jac = self._weights['obs']*jac
        norm_res = np.linalg.norm(self.obs_data - self.kernel_op.gtoep.matvec(x))
        jac = jac/norm_res
        if 'refer' in self.weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = self.constraints['depth']*v
            norm_refer = np.linalg.norm(v)
            jac += self.weights['refer']*self.weights['depth']*self.constraints['depth']*v/norm_refer

        for key in self.smooth_components:
            v = x
            if 'depth' in self._weights.keys():
                v = self.constraints['depth']*v
            tmp2 = self.smop.derivation(v.reshape(self._nz,self._ny,self._nx),
                                        component=key)
            smooth_norm = np.linalg.norm(tmp2.ravel())
            tmp2 = self.smop.rderivation(tmp2,component=key)
            jac += self.weights[key]*self.weights['depth']*self.constraints['depth']*tmp2.ravel()/smooth_norm
        return jac

    def lasso_hessp(self,x,v):
        '''hessian of the lasso target function.
        '''
#        res = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(v))
#        res = 2.0*self._weights['obs']*res
        norm_res = np.linalg.norm(self.obs_data - self.kernel_op.gtoep.matvec(x))
        res = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(v))/norm_res
        gradient_res = (self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(x))
                        - self.kernel_op.gtoep.rmatvec(self.obs_data))
        res -= np.dot(gradient_res,v)/norm_res**3 * gradient_res
        res *= self._weights['obs']
        if 'refer' in self.weights.keys():
            v2 = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v2 = self.constraints['depth']*v2
            norm_refer = np.linalg.norm(v2)
            res += self.weights['refer']*self.weights['depth']/norm_refer*self.constraints['depth']*v
            grad_ref = self.constraints['depth']*v2
            res -= (self.weights['refer']*self.weights['depth']
                    *(np.dot(v,grad_ref)/norm_refer**3*grad_ref))
        for key in self.smooth_components:
            v2 = x
            if 'depth' in self._weights.keys():
                v2 = self.constraints['depth']*v2
            tmp2 = self.smop.derivation(v2.reshape(self._nz,self._ny,self._nx),
                                        component=key)
            smooth_norm = np.linalg.norm(tmp2.ravel())
            tmp2 = self.smop.rderivation(tmp2,component=key)
            grad_sm = self.constraints['depth']*tmp2.ravel()

            tmp2 = v.reshape(self.nz,self.ny,self.nx)
            tmp2 = self.smop.derivation(tmp2,component=key)
            tmp2 = self.smop.rderivation(tmp2,component=key)
            tmp2 = self.constraints['depth']*tmp2.ravel()
            res += (self._weights['depth']*self._weights[key]/smooth_norm
                    *(tmp2 - np.dot(v,grad_sm)*grad_sm/smooth_norm**2))
        return res


    @timeit
    def lasso_optimize(self,x0=None):
        '''optimize the lasso function.
        '''
        density_bounds = Bounds(self.min_density,self.max_density)
        if x0 is None:
            x0 = (np.random.rand(self._nx*self._ny*self._nz)
                  *(self.max_density - self.min_density)/2.
                  +(self.max_density + self.min_density)/2.)
        self.bound_solution = minimize(self.lasso_target,
                                       x0,
                                       method='trust-constr',
                                       jac=self.lasso_jac,
                                       hessp=self.lasso_hessp,
                                       bounds=density_bounds
                                       )

    def calc_res(self):
        '''calculate the residual information including residuals and stds.
        The result of this function are stored in ``self.residuals`` and ``self.stds``.
        '''
        self.residuals = dict()
        self.stds = dict()
        self.residuals['obs'] = np.linalg.norm(self.kernel_op.gtoep.matvec(self.solution)-self.obs_data)**2
        self.stds['obs'] = np.std(self.kernel_op.gtoep.matvec(self.solution)-self.obs_data)
        for key in self._smooth_components:
            tmp2 = self.solution.reshape(self._nz,self._ny,self._nx)
            if ('refers' in self.constraints_val.keys()) and (self.smooth_on == 'm-m0'):
                tmp2 -= self.constraints_val['refer'].reshape(self._nz,self._ny,self._nx)
            tmp2 = self.smop.derivation(tmp2,component=key)
            self.residuals[key] = np.linalg.norm(tmp2.ravel())**2
            self.stds[key] = np.std(tmp2.ravel())
        if 'refers' in self.constraints_val.keys():
            self.residuals['refers'] = []
            self.stds['refers'] = []
            for i,refer_density in self.constraints_val['refers']:
                self.residuals['refers'].append(np.linalg.norm(self.solution.ravel()-refer_density.ravel())**2)
                self.stds['refers'].append(np.std(self.solution.ravel()-refer_density.ravel()))

    def calc_log_prior_total_det_naive(self):
        '''calculate the determinant of prior distribution and joint distribution
        with minimum message printed out.
        '''
        self.log_prior_det_val = 0
        self.log_total_det_val = 0
        prior_eigs = np.zeros(self._nx*self._ny*self._nz)
        total_eigs = np.zeros(self._nx*self._ny*self._nz)
        tmp_mat = np.zeros((self._nz*self._ny*self._nx,self._nz*self._ny*self._nx))
        for dxyz_name in self._smooth_components:
            tmp_mat += self._weights[dxyz_name]*self.matrices[dxyz_name]
        if 'depth' in self._weights.keys():
            tmp_mat = self.constraints['depth'].reshape(-1,1)*self.constraints['depth'].reshape(1,-1)*tmp_mat
        prior_eigs = np.linalg.svd(tmp_mat,compute_uv=False)
        eps = prior_eigs.max() * len(prior_eigs) * np.finfo(np.float64).eps
        eigs = prior_eigs[prior_eigs>eps]
        self.log_prior_det_val = np.sum(np.log(eigs))

        tmp_mat += self._weights['obs']*self.matrices['obs']
        if 'refers' in self._weights.keys():
            tmp_mat += sum(self._weights['refers'])*np.diag(self.constraints['depth'])**2
        uu,total_eigs,vv = np.linalg.svd(tmp_mat,compute_uv=True)
        self.log_total_det_val = np.sum(np.log(total_eigs))
        self._gen_rhs()
        self.solution = np.zeros(np.prod(self.nzyx))
        self.solution = (vv.T @ ((1./total_eigs).ravel() * (uu.T @ self.rhs)))

        self.eigs = {'prior':prior_eigs,'total':total_eigs}
        return self.log_prior_det_val,self.log_total_det_val

    def calc_log_prior_total_det_walsh(self):
        '''calculate the determinant of prior distribution and joint distribution
        with minimum message printed out.
        '''
        self.log_prior_det_val = 0
        self.log_total_det_val = 0
        blocks = ['0','1','2','3']
        prior_eigs = np.zeros(self._nx*self._ny*self._nz)
        total_eigs = np.zeros(self._nx*self._ny*self._nz)
        step = self._nx*self._ny*self._nz//4
        try:
            depth_weight = self._weights['depth']
        except KeyError:
            depth_weight = 1.
        with h5py.File(self.fname,mode='r') as f:
            if 'depth' in self._weights.keys():
                depth_walsh = f['depth']['0'][:]
            self._gen_rhs()
            self.solution = np.zeros(np.prod(self.nzyx))
            for i_b,block in enumerate(blocks):
                tmp_block = np.zeros((step,step))
                walsh_group = f['walsh_matrix']
                part_walsh = walsh_group[blocks[i_b]][:]
                for dxyz_name in self._smooth_components:
                    try:
                        dxyz_walsh = f[dxyz_name][block][:].reshape(step//self._nz,
                                                                    self._nz,
                                                                    step//self._nz,
                                                                    self._nz)
                        ein_path = np.einsum_path('mi,xiyj,jn->xmyn',
                                                  depth_walsh.T,
                                                  dxyz_walsh,
                                                  depth_walsh,
                                                  optimize='optimal')[0]
                        tmp_multi = np.einsum('mi,xiyj,jn->xmyn',
                                              depth_walsh.T,
                                              dxyz_walsh,
                                              depth_walsh,
                                              optimize=ein_path)
                        tmp_block += depth_weight*self._weights[dxyz_name]*tmp_multi.reshape(step,step)
                    except KeyError:
                        pass

                if use_gpu > 0:
                    import cupy as cp
                    with cp.cuda.Device(self.gpu_id):
                        tmp_block_gpu = cp.asarray(tmp_block,dtype=np.float64)
                        eigs = cp.linalg.svd(tmp_block_gpu,compute_uv=False)
                        prior_eigs[i_b*step:(i_b+1)*step] = cp.asnumpy(eigs)
                        eps = eigs.max() * len(eigs) * np.finfo(np.float64).eps
                        eigs = eigs[eigs>eps]
                        self.log_prior_det_val += cp.asnumpy(cp.sum(cp.log(eigs)))
                        tmp_block_gpu = None
                        eigs = None
                        free_gpu()
                    tmp_block += self._weights['obs']*f['kernel'][block][:]
                    if 'refers' in self._weights.keys():
                        tmp_multi_small = depth_walsh.T@depth_walsh
                        for i in range(step//self._nz):
                            tmp_block[i*self._nz:(i+1)*self._nz,
                                      i*self._nz:(i+1)*self._nz] += sum(self._weights['refers'])*tmp_multi_small
                    with cp.cuda.Device(self.gpu_id):
                        tmp_block_gpu = cp.asarray(tmp_block,dtype=np.float64)
                        eigs = cp.linalg.svd(tmp_block_gpu,compute_uv=False)
                        total_eigs[i_b*step:(i_b+1)*step] = cp.asnumpy(eigs)
                        #eigs = eigs[eigs>1.0e-12]
                        self.log_total_det_val += cp.asnumpy(cp.sum(cp.log(eigs)))
                        tmp_block_gpu = None
                        eigs = None
                        free_gpu()
                else:
                    tmp_block_gpu = np.asarray(tmp_block,dtype=np.float64)
                    eigs = np.linalg.svd(tmp_block_gpu,compute_uv=False)
                    prior_eigs[i_b*step:(i_b+1)*step] = eigs
                    eps = eigs.max() * len(eigs) * np.finfo(np.float64).eps
                    eigs = eigs[eigs>eps]
                    self.log_prior_det_val += np.sum(np.log(eigs))
                    tmp_block_gpu = None
                    eigs = None
                    tmp_block += self._weights['obs']*f['kernel'][block][:]
                    if 'refers' in self._weights.keys():
                        tmp_multi_small = depth_walsh.T@depth_walsh
                        #tmp_multi_small = np.eye(tmp_multi_small.shape[0])
                        for i in range(step//self._nz):
                            tmp_block[i*self._nz:(i+1)*self._nz,
                                      i*self._nz:(i+1)*self._nz] += sum(self._weights['refers'])*tmp_multi_small
                    tmp_block_gpu = np.asarray(tmp_block,dtype=np.float64)
                    uu,eigs,vv = np.linalg.svd(tmp_block_gpu,compute_uv=True)
                    total_eigs[i_b*step:(i_b+1)*step] = eigs
                    #eigs = eigs[eigs>1.0e-12]
                    self.log_total_det_val += np.sum(np.log(eigs))
                    tmp_block_gpu = None
                    self.solution += part_walsh.T @ (vv.T @ ((1./eigs).ravel() * (uu.T @ (part_walsh @ self.rhs))))
                    eigs = None


        if use_gpu > 0:
            self.log_prior_det_val = cp.asnumpy(self.log_prior_det_val)
            self.log_total_det_val = cp.asnumpy(self.log_total_det_val)
        else:
            self.log_prior_det_val = self.log_prior_det_val
            self.log_total_det_val = self.log_total_det_val

        self.eigs = {'prior':prior_eigs,'total':total_eigs}
        return self.log_prior_det_val,self.log_total_det_val

    def calc_log_prior_total_det_quiet(self,mode=None):
        '''calculate the determinant of prior distribution and joint distribution
        with minimum message printed out.
        '''
        if mode is None:
            mode = self.mode
        if mode == 'walsh':
            self.calc_log_prior_total_det_walsh()
        elif mode == 'naive':
            self.calc_log_prior_total_det_naive()
        else:
            raise ValueError('mode={} is not implemented!!'.format(mode))

    def prepare_det(self,mode=None):
        if mode is None:
            mode = self.mode
        if mode == 'walsh':
            self.walsh_transform()
        elif mode == 'naive':
            self.matrices = dict()
            n_cell = self._nx*self._ny*self._nz
            self.matrices['G']  = self.kernel_op.gtoep.matvec(np.eye(self._nx*self._ny*self._nz)).T
            self.matrices['obs'] = self.kernel_op.gtoep.rmatvec(self.kernel_op.gtoep.matvec(np.eye(n_cell)))
            for key in self._smooth_components:
                self.matrices[key] = self.smop.rderivation(self.smop.derivation(np.eye(n_cell).reshape(-1,self._nz,self._ny,self._nx),key),key).reshape(n_cell,n_cell)
        else:
            raise ValueError('mode={} is not implemented!!'.format(mode))

    @timeit
    def calc_log_prior_total_det(self,mode=None):
        '''calculate the determinant of prior distribution and joint distribution.
        '''
        return self.calc_log_prior_total_det_quiet(mode=mode)

    def calc_log_obs_det_quiet(self):
        '''calculate the determinant of observation's distribution with minimum
        message printed out.
        '''
        self.log_obs_det_val = (4*sum(np.log(self.constraints['depth']))
                                +np.prod(self.nzyx)*(np.log(self.weights['refers'][0])
                                                +np.log(self.weights['refers'][1]))
                                 +np.log(self._weights['obs'])*len(self.obs_data))
        return self.log_obs_det_val

    @timeit
    def calc_log_obs_det(self):
        '''calculate the determinant of observation's distribution message printed out.
        '''
        return self.calc_log_obs_det_quiet()

    @timeit
    def calc_abic(self,mode=None):
        '''calculate abic value: -log_prior_det_value+log_total_det-log_obs_det+min_u'''
        self.calc_log_obs_det()
        self.calc_log_prior_total_det(mode=mode)
        self.calc_u(solved=True)
        self.abic_val = (self.log_total_det_val
                         + self.min_u_val
                         - self.log_prior_det_val
                         - self.log_obs_det_val)
        return self.abic_val

    def calc_abic_quiet(self,mode=None):
        '''calculate abic value: -log_prior_det_value+log_total_det-log_obs_det+min_u'''
        self.calc_log_obs_det_quiet()
        self.calc_log_prior_total_det_quiet(mode=mode)
        self.calc_u_quiet(solved=True)
        self.abic_val = (self.log_total_det_val
                         + self.min_u_val
                         - self.log_prior_det_val
                         - self.log_obs_det_val)
        self.optimize_log['parameters'].append(self.weights)
        self.optimize_log['abic_vals'].append(self.abic_val)
        return self.abic_val

    def _abic_optimize_exp(self,mode=None,**opt_args):
        '''optimize the abic value. Use the log and exp trick to constrain weights
        to be positive.
        '''
        #optimize_keys = list(set(self._weights.keys())-set(['depth']))
        if self.confs['optimize_keys'] is None:
            optimize_keys = list(self._weights.keys())
        else:
            optimize_keys = self.confs['optimize_keys']
        optimize_keys_1 = list(set(optimize_keys)-set(['refers']))
        def abic_target(x):
            tmp_weights = dict()
            for i,key in enumerate(optimize_keys_1):
                tmp_weights[key] = np.exp(x[i])
            if 'refers' in optimize_keys:
                tmp_weights['refers'] = list(np.exp(x[-len(self.weights['refers']):]))
            self.weights = {**self.weights,**tmp_weights}
            self.calc_abic_quiet(mode=mode)
            return self.abic_val
        n_weights = len(optimize_keys)
        if 'refers' in optimize_keys:
            n_weights += len(self.weights['refers']) - 1
        x0 = np.zeros(n_weights)
        for i,key in enumerate(optimize_keys_1):
            x0[i] = np.log(self._weights[key])
        if 'refers' in optimize_keys:
            for i,refer_weight in enumerate(self._weights['refers']):
                x0[len(optimize_keys_1)+i] = np.log(refer_weight)
        self.abic_optimize_summary = minimize(abic_target,
                                              x0,
                                              **opt_args)

    def _abic_optimize_bound(self):
        '''optimize the abic value. Use scipy's COBYLA algorithm to constrain weights
        to be positive.
        '''
        if self.confs['optimize_keys'] is None:
            optimize_keys = list(self._weights.keys())
        else:
            optimize_keys = self.confs['optimize_keys']
        def abic_target(x):
            tmp_weights = dict()
            for i,key in enumerate(optimize_keys):
                tmp_weights[key] = np.exp(x[i])
            self.weights = {**self.weights,**tmp_weights}
            return self.calc_abic_quiet()
        x0 = np.zeros(len(optimize_keys))
        for i,key in enumerate(optimize_keys):
            x0[i] = self._weights[key]
        weight_constraint = LinearConstraint(np.eye(len(optimize_keys)),0.,np.inf)
        self.abic_optimize_summary = minimize(abic_target,
                                              x0,
                                              method='COBYLA',
                                              constraints=weight_constraint)

    @timeit
    def abic_optimize(self,mode=None,**opt_args):
        '''optimize the abic value. This is the interface for users.
        '''
        self._abic_optimize_exp(mode=mode,**opt_args)

    @timeit
    def para_grad(self,x):
        pass

    def u_bound(self):
        pass

    def print_summary(self):
        print('abic values:{}'.format(self.abic_val))
        print('log total det:{}'.format(self.log_total_det_val))
        print('log prior det:{}'.format(self.log_prior_det_val))
        print('log obs det:{}'.format(self.log_obs_det_val))
        print('min u:{}'.format(self.min_u_val))
        print('std:',end=' ')
        print(self.stds)
        print('1/var:',end=' ')
        print({k:1./v**2 for k,v in self.stds.items()})
        print('norms:',end=' ')
        print(self.residuals)
コード例 #7
0
class GravInvAbicModel:
    def __init__(self,
                 nzyx=[4,4,4],
                 smooth_components=['dx','dy','dz'],
                 depth_constraint=None,
                 model_density=None,
                 refer_density=None,
                 weights=None,
                 source_volume=None,
                 smooth_on='m',
                 data_dir='/data/gravity_inversion'):
        self._nz,self._ny,self._nx = nzyx
        self.smooth_on = smooth_on
        self.dxyz_shapes = {'dx':(self._nz,self._ny,self._nx),
                  'dy':(self._nz,self._nx*self._ny),
                  'dz':(self._nx*self._ny*self._nz,)}
        self.dxyz_spaces = {'dx':self._nx-1,
                  'dy':self._nx*(self._ny-1),
                  'dz':self._nx*self._ny*(self._nz-1)}
        self.data_dir = data_dir
        self.gen_model_name()
        self.nobsx = nzyx[2]
        self.nobsy = nzyx[1]
        self.source_volume = source_volume
        if model_density is None:
            self._model_density = None
        else:
            self._model_density = model_density.ravel()
        self._smooth_components = smooth_components
        self.constraints = dict()
        self.constraints_val = dict()
        if depth_constraint is None:
            self.constraints['depth'] = np.ones(np.prod(nzyx))
            self.constraints_val['depth'] = None
        else:
            self.constraints['depth'] = (depth_constraint.reshape(-1,1)*np.ones((1,self._nx*self._ny))).ravel()
            self.constraints_val['depth'] = 0
        if refer_density is None:
            self.constraints['refer'] = None
            self.constraints_val['refer'] = None
        else:
            self.constraints['refer'] = np.ones(self._nx*self._ny*self._nz)
            self.constraints_val['refer'] = refer_density.ravel()
        self._weights = weights
        if not 'depth' in self._weights.keys():
            self._weights['depth'] = 1.0
        self._gen_dxyz_constraint()
        self.kernel_op = None
        self.abic_val = 0
        self.log_total_det_val = 0
        self.log_prior_det_val = 0
        self.log_obs_det_val = 0
        self.min_u_val = 0
        self.min_density = -1.0e4
        self.max_density = 1.0e4

    @property
    def source_volume(self):
        return self._source_volume
    @source_volume.setter
    def source_volume(self,value):
        self._source_volume = value
        self.gen_mesh()

    def gen_model_name(self):
        self.model_name = '{}x{}x{}'.format(self._nx,self._ny,self._nz)
        self.fname = pathlib.Path(self.data_dir)/pathlib.Path(self.model_name+'.h5')

    @property
    def weights(self):
        return self._weights
    @weights.setter
    def weights(self,values):
        self._weights = values
        if not self.kernel_op is None:
            self.kernel_op.weights = self._weights

    @property
    def smooth_components(self):
        return self._smooth_components
    @smooth_components.setter
    def smooth_components(self,values):
        self._smooth_components = values
        self._gen_dxyz_constraint()
        if not self.kernel_op is None:
            self.kernel_op.dxyz_constraint = self.dxyz_constraint

    @timeit
    def _gen_dxyz_constraint(self):
        '''first generate multi-level circulant matrix, constraint of dx is a part of it. then calculate it's eigenvalues.
        self._dx stores the eigenvalues finally. When multiply it with a vector, specific element should be discarded'''
        self.dxyz_constraint = dict()
        for component in self._smooth_components:
            tmp = np.zeros(self.nx*self.ny*self.nz)
            tmp[0] = 1
            tmp[self.dxyz_spaces[component]] = -1
            tmp = tmp.reshape(self.dxyz_shapes[component])
            self.dxyz_constraint[component] = np.fft.fftn(tmp)
            self.constraints[component] = self.dxyz_constraint[component]

    @property
    def refer_density(self):
        return self.constraints_val['refer'].reshape(self._nz,self._ny,self._nx)
    @refer_density.setter
    def refer_density(self,value):
        self.constraints_val['refer'] = value.ravel()

    @property
    def nx(self):
        return self._nx
    @nx.setter
    def nx(self,value):
        self._nx = value
        self.nobsx = self._nx
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(self._nz,-1)[:,0]*np.ones((1,self._nx*self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refer'] = np.ones(self._nx*self._ny*self._nz)

    @property
    def ny(self):
        return self._ny
    @ny.setter
    def ny(self,value):
        self._ny = value
        self.nobsy = self._ny
        self.gen_model_name()
        if not self.constraints['depth'] is None:
            self.constraints['depth'] = self.constraints['depth'].reshape(self._nz,-1)[:,0]*np.ones((1,self._nx*self._ny))
            self.constraints['depth'] = self.constraints['depth'].ravel()
        self.constraints['refer'] = np.ones(self._nx*self._ny*self._nz)

    @property
    def nz(self):
        return self._nz
    @nz.setter
    def nz(self,value):
        self._nz = value
        self.gen_model_name()
        self.constraints['refer'] = np.ones(self._nx*self._ny*self._nz)
        print("Warning: nz changed. \nDon't forget setting depth constraints.")

    @property
    def model_density(self):
        return(self._model_density.reshape(self.nz,self.ny,self.nx))
    @model_density.setter
    def model_density(self,value):
        self._model_density = value.ravel()

    def gen_mesh(self,height = -1):
        shape = (self._nz, self._ny, self._nx)
        self.mesh = PrismMesh(self._source_volume, shape)
        density = np.ones(shape)*1.0e3
        self.mesh.addprop('density', density.ravel())
        # generate obs grid
        # coordinate: x North-South,y East-West
        # gridder is in the order: (nx,ny)
        self.obs_area = (self._source_volume[0]+0.5*self.mesh.dims[0],
                         self._source_volume[1]-0.5*self.mesh.dims[0],
                         self._source_volume[2]+0.5*self.mesh.dims[1],
                         self._source_volume[3]-0.5*self.mesh.dims[1])
        obs_shape = (self.nobsx, self.nobsy)
        self.xp, self.yp, self.zp = gridder.regular(self.obs_area, obs_shape, z=height)

    def _gen_walsh_matrix(self):
        print('generating walsh_matrix')
        if os.path.exists(self.fname):
            with h5py.File(self.fname,mode='r') as f:
                if not 'walsh_matrix' in f.keys():
                    have_walsh_matrix = False
                else:
                    have_walsh_matrix = True
        else:
            have_walsh_matrix = False
        if have_walsh_matrix:
            return
        walsh_matrix = walsh.walsh_matrix(self._nx*self._ny*self._nz,
                                          normalized=True,
                                          ordering='sequence2',
                                          nxyz=(self._nx,self._ny,self._nz))
        walsh_matrix = walsh_matrix.astype(np.float32)
        step = self._nx*self._ny*self._nz//4
        components = ['0','1','2','3']
        with h5py.File(self.fname,mode='a') as f:
            fgroup = f.create_group('walsh_matrix')
            for i in range(4):
                fgroup.create_dataset(components[i],data=walsh_matrix[i*step:(i+1)*step,:])

    def gen_kernel(self):
        def calc_kernel(i):
            return prism.gz(self.xp[0:1],self.yp[0:1],self.zp[0:1],[self.mesh[i]])
        with Pool(processes=16) as pool:
            kernel0 = pool.map(calc_kernel,range(len(self.mesh)))
        self.kernel0 = np.array(kernel0).reshape(self.nz,self.ny,self.nx)
        self.kernel_op = AbicLSQOperator(self.kernel0,
                                         depth_constraint=self.constraints['depth'],
                                         dxyz_constraint=self.dxyz_constraint,
                                         refer_constraint=self.constraints['refer'],
                                         weights=self._weights)

    def _dxyzvec(self,vec=None,key=None):
        res = vec.reshape(-1,*self.dxyz_shapes[key])
        axes = np.arange(1,res.ndim)
        res = np.fft.ifftn(np.fft.fftn(res,axes=axes)*self.dxyz_constraint[key],axes=axes).real
        slices = [slice(None)]*res.ndim
        slices[-1] = slice(0,self.dxyz_spaces[key])
        if vec.ndim == 1:
            return res[tuple(slices)].ravel()
        else:
            return res[tuple(slices)].reshape(vec.shape[0],-1)

    def _diagvec(self,vec=None,diag=None):
        if vec.ndim == 1:
            return vec * diag
        else:
            return  vec * diag.reshape(1,-1)

    @timeit
    def walsh_transform(self,keys=None):
        if keys is None:
            keys = ['kernel'] + list(self.constraints.keys())
        else:
            keys = keys
        is_stored = dict()
        for key in keys:
            is_stored[key] = False
        if os.path.exists(self.fname):
            with h5py.File(self.fname,mode='r') as f:
                for key in keys:
                    try:
                        if '3' in f[key].keys():
                            is_stored[key] = True
                        if key == 'depth':
                            res = f['depth']['constraint'][:] - self.constraints['depth']
                            res = np.linalg.norm(res)/np.linalg.norm(self.constraints['depth'])
                            if res > 1.0e-3:
                                is_stored[key] = False
                    except KeyError:
                        continue
        self._gen_walsh_matrix()
        logn = int(np.ceil(np.log2(self._nx*self._ny*self._nz)))
        norm_walsh = 1./(np.sqrt(2)**logn)
        blocks = ['0','1','2','3']
        matvec_op = {'kernel':self.kernel_op.gtoep.matvec,
                  'dx': lambda x: self._dxyzvec(x,key='dx'),
                  'dy': lambda x: self._dxyzvec(x,key='dy'),
                  'dz': lambda x: self._dxyzvec(x,key='dz'),
                  'refer': lambda x: self._diagvec(x,diag=self.constraints['refer']),
                  'depth': lambda x: self._diagvec(x,diag=np.sqrt(self.constraints['depth']))
                 }
        is_stored['refer'] = True
        for key in keys:
            if is_stored[key]:
                print('walsh transformation of {} already exists.'.format(key))
                continue
            print('performing walsh transformation on {}.'.format(key))
            step = self.nx*self.ny*self.nz // 4
            if key == 'depth':
                step = self._nz
            with h5py.File(self.fname,mode='a') as f:
                try:
                    del f[key]
                except KeyError:
                    pass
                dxyz_group = f.create_group(key)
                walsh_group = f['walsh_matrix']
                for i in range(4):
                    print("\t progress {}/4".format(i))
                    part_walsh = walsh_group[blocks[i]][:]
                    if key == 'depth':
                        part_walsh = walsh_group[blocks[i]][:self._nz]
                    part_walsh = matvec_op[key](part_walsh)
                    with cp.cuda.Device(2):
                        res = cp.zeros((step,step))
                        j = 0
                        while j*step < part_walsh.shape[1]:
                            tmp_block_gpu = cp.asarray(part_walsh[:,j*step:(j+1)*step])
                            res += tmp_block_gpu @ tmp_block_gpu.T
                            j += 1
                        res = cp.asnumpy(res)
                        if key in self._smooth_components:
                            res[np.abs(res)<1.0e-1*norm_walsh] = 0.
                        tmp_block_gpu = None
                        mempool = cp.get_default_memory_pool()
                        pinned_mempool = cp.get_default_pinned_memory_pool()
                        mempool.free_all_blocks()
                        pinned_mempool.free_all_blocks()
                    dxyz_group.create_dataset(blocks[i],data=res)
        if ('depth' in keys) and (not is_stored['depth']):
            with h5py.File(self.fname,mode='a') as f:
                try:
                    del f['depth_constraint']
                except KeyError:
                    pass
                dxyz_group = f['depth']
                dxyz_group.create_dataset('constraint',data=self.constraints['depth'])

    @property
    def depth_constraint(self):
        return(self.constraints['depth'].reshape(self._nz,-1)[:,0])
    @depth_constraint.setter
    def depth_constraint(self,value):
        self.constraints['depth'] = (value.reshape(-1,1)*np.ones((1,self._nx*self._ny))).ravel()

    @timeit
    def forward(self,model_density=None):
        if model_density is None:
            model_density = self._model_density
        else:
            model_density = model_density.ravel()
        self.obs_data = self.kernel_op.gtoep.matvec(model_density)

    def _gen_rhs(self):
        self.rhs = self._weights['obs']*self.kernel_op.gtoep.rmatvec(self.obs_data)
        if 'depth' in self._weights.keys():
            v = self.constraints['depth']*self.constraints_val['refer']
        if 'refer' in self._weights.keys():
            self.rhs += (self._weights['refer']
                         *self._weights['depth']
                         *self.constraints['depth']
                         *v)
        if self.smooth_on == 'm-m0':
            if not self.dxyz_constraint is None:
                for key,constraint in self.dxyz_constraint.items():
                    if not key in self._weights.keys():
                        continue
                    tmp2 = v.reshape(-1,*constraint.shape)
                    fft_comp = list(range(tmp2.ndim))[1:]
                    tmp2 = np.fft.ifftn(np.fft.fftn(tmp2,axes=fft_comp)*constraint,axes=fft_comp).real
                    slices = [slice(None)]*tmp2.ndim
                    slices[-1] = slice(self.dxyz_spaces[key],None)
                    tmp2[tuple(slices)] = 0
                    tmp2 = np.real(np.fft.ifftn(np.fft.fftn(tmp2,axes=fft_comp)*np.conj(constraint),axes=fft_comp))
                    if v.ndim == 1:
                        self.rhs += self._weights[key]*self._weights['depth']*self.constraints['depth']*tmp2.ravel()
                    else:
                        self.rhs += self._weights[key]*self._weights['depth']*self.constraints['depth']*tmp2.reshape(v.shape[0],-1)

    @timeit
    def do_linear_solve(self):
        self._gen_rhs()
        self.solution = spsparse.linalg.cg(self.kernel_op,self.rhs,tol=1.0e-5)[0]

    @timeit
    def calc_min_u(self,solved=False,x=None):
        if x is None:
            if not solved:
                self.do_linear_solve()
            x = self.solution
        self.min_u_val = self._weights['obs']*np.linalg.norm(self.kernel_op.gtoep.matvec(x) - self.obs_data)**2
        if ('refer' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = np.sqrt(self._weights['depth'])*self.constraints['depth']*v
        if not self.dxyz_constraint is None:
            for key,constraint in self.dxyz_constraint.items():
                if not key in self._weights.keys():
                    continue
                tmp2 = np.fft.ifftn(
                         np.fft.fftn(v.reshape(constraint.shape))*constraint
                       ).real
                slices = [slice(None)]*constraint.ndim
                slices[-1] = slice(0,self.dxyz_spaces[key])
                self.min_u_val += self._weights[key]*np.linalg.norm(tmp2[tuple(slices)].ravel())**2
        if 'refer' in self._weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = np.sqrt(self._weights['depth'])*self.constraints['depth']*v
            self.min_u_val += self._weights['refer'] *np.linalg.norm(v)**2
        return self.min_u_val

    def bound_constraint_u(self,x=None):
        self.calc_min_u(x=x,solved=True)
        log_barrier = np.sum(np.log(x-self.min_density) + np.log(self.max_density-x))
        return self.min_u_val - 2.*self._weights['bound']*log_barrier

    def bound_jac_u(self,x=None):
        res = 0.
        res += self._weights['obs']*(self.kernel_op.gtoep.matvec(x) - self.obs_data)
        if ('refer' in self._weights.keys()) and (self.smooth_on == 'm-m0'):
            v = x - self.constraints_val['refer']
        else:
            v = x
        if 'depth' in self._weights.keys():
            v = self._weights['depth']*self.constraints['depth']*v
        if not self.dxyz_constraint is None:
            for key,constraint in self.dxyz_constraint.items():
                if not key in self._weights.keys():
                    continue
                tmp2 = np.fft.ifftn(
                         np.fft.fftn(v.reshape(constraint.shape))*constraint
                       ).real
                slices = [slice(None)]*constraint.ndim
                slices[-1] = slice(0,self.dxyz_spaces[key])
                res += self._weights[key]*tmp2[tuple(slices)].ravel()
        if 'refer' in self._weights.keys():
            v = x - self.constraints_val['refer']
            if 'depth' in self._weights.keys():
                v = self._weights['depth']*self.constraints['depth']*v
            res += self._weights['refer'] *v
        res += self._weights['bound']*(1./(self.max_density-x) - 1./(x-self.min_density))
        return 2.*res

    def bound_hessp_u(self,x,v):
        res = self.kernel_op.matvec(v)
        hess_diag = 1./(self.max_density-x)**2 + 1./(x-self.min_density)**2
        res += self._weights['bound']*hess_diag*v
        return 2.*res

    def bound_optimize(self,x0=None):
        if x0 is None:
            if 'refer' in self._weights.keys():
                x0 = self.constraints_val['refer']
            else:
                x0 = np.zeros(self._nx*self._ny*self._nz)
        self.solution = minimize(self.bound_constraint_u,
                       x0,
                       method='Newton-CG',
                       jac=self.bound_jac_u,
                       hessp=self.bound_hessp_u)

    def calc_res(self):
        self.residuals = dict()
        self.stds = dict()
        self.residuals['obs'] = np.linalg.norm(self.kernel_op.gtoep.matvec(self.solution)-self.obs_data)**2
        self.stds['obs'] = np.std(self.kernel_op.gtoep.matvec(self.solution)-self.obs_data)
        for key in self.dxyz_constraint.keys():
            try:
                tmp2 = self.solution.reshape(self.dxyz_constraint[key].shape)
                if ('refer' in self.constraints_val.keys()) and (self.smooth_on == 'm-m0'):
                    tmp2 -= self.constraints_val['refer'].reshape(self.dxyz_constraint[key].shape)
                tmp2 = np.fft.ifftn(np.fft.fftn(tmp2)*self.dxyz_constraint[key]).real
                slices = [slice(None)]*tmp2.ndim
                slices[-1] = slice(0,self.dxyz_spaces[key])
                self.residuals[key] = np.linalg.norm(tmp2[tuple(slices)].ravel())**2
                self.stds[key] = np.std(tmp2[tuple(slices)].ravel())
            except KeyError:
                pass
        if 'refer' in self.constraints_val.keys():
            self.residuals['refer'] = np.linalg.norm(self.solution.ravel()-self.constraints_val['refer'].ravel())**2
            self.stds['refer'] = np.std(self.solution.ravel()-self.constraints_val['refer'].ravel())

    @timeit
    def calc_log_prior_total_det(self):
        self.log_prior_det_val = 0
        self.log_total_det_val = 0
        blocks = ['0','1','2','3']
        prior_eigs = np.zeros(self._nx*self._ny*self._nz)
        total_eigs = np.zeros(self._nx*self._ny*self._nz)
        step = self._nx*self._ny*self._nz//4
        try:
            depth_weight = self._weights['depth']
        except KeyError:
            depth_weight = 1.
        with h5py.File(self.fname,mode='r') as f:
            if 'depth' in self._weights.keys():
                depth_walsh = f['depth']['0'][:]
            for i_b,block in enumerate(blocks):
                tmp_block = np.zeros((step,step))
                for dxyz_name in self._smooth_components:
                    try:
                        dxyz_walsh = f[dxyz_name][block][:].reshape(step//self._nz,
                                                                    self._nz,
                                                                    step//self._nz,
                                                                    self._nz)
                        ein_path = np.einsum_path('mi,xiyj,jn->xmyn',
                                                  depth_walsh.T,
                                                  dxyz_walsh,
                                                  depth_walsh,
                                                  optimize='optimal')[0]
                        tmp_multi = np.einsum('mi,xiyj,jn->xmyn',
                                              depth_walsh.T,
                                              dxyz_walsh,
                                              depth_walsh,
                                              optimize=ein_path)
                        tmp_block += depth_weight*self._weights[dxyz_name]*tmp_multi.reshape(step,step)
                    except KeyError:
                        pass
                if 'refer' in self._weights.keys():
                    tmp_multi_small = depth_walsh.T@depth_walsh
                    for i in range(step//self._nz):
                        tmp_block[i*self._nz:(i+1)*self._nz,
                                  i*self._nz:(i+1)*self._nz] += depth_weight*self._weights['refer']*tmp_multi_small
                with cp.cuda.Device(2):
                    tmp_block_gpu = cp.asarray(tmp_block,dtype=np.float32)
                    eigs = cp.linalg.eigvalsh(tmp_block_gpu)
                    prior_eigs[i_b*step:(i_b+1)*step] = cp.asnumpy(eigs)
                    self.log_prior_det_val += cp.asnumpy(cp.sum(cp.log(eigs)))
                    tmp_block_gpu = None
                    eigs = None
                    free_gpu()
                tmp_block += self._weights['obs']*f['kernel'][block][:]
                with cp.cuda.Device(2):
                    tmp_block_gpu = cp.asarray(tmp_block,dtype=np.float32)
                    eigs = cp.linalg.eigvalsh(tmp_block_gpu)
                    total_eigs[i_b*step:(i_b+1)*step] = cp.asnumpy(eigs)
                    self.log_total_det_val += cp.asnumpy(cp.sum(cp.log(eigs)))
                    tmp_block_gpu = None
                    eigs = None
                    free_gpu()
        self.log_prior_det_val = cp.asnumpy(self.log_prior_det_val)
        self.log_total_det_val = cp.asnumpy(self.log_total_det_val)
        self.eigs = {'prior':prior_eigs,'total':total_eigs}
        return self.log_prior_det_val,self.log_total_det_val

    @timeit
    def calc_log_obs_det(self):
        self.log_obs_det_val = np.log(self._weights['obs'])*len(self.obs_data)
        return self.log_obs_det_val

    @timeit
    def calc_abic(self):
        '''-log_prior_det_value+log_total_det-log_obs_det+min_u'''
        self.calc_log_prior_total_det()
        self.calc_min_u()
        self.calc_log_obs_det()
        self.abic_val = (self.log_total_det_val
                         + self.min_u_val
                         - self.log_prior_det_val
                         - self.log_obs_det_val)
        return self.abic_val

    @timeit
    def para_grad(self,x):
        pass

    def u_bound(self):
        pass

    def print_summary(self):
        print('abic values:{}'.format(self.abic_val))
        print('log total det:{}'.format(self.log_total_det_val))
        print('log prior det:{}'.format(self.log_prior_det_val))
        print('log obs det:{}'.format(self.log_obs_det_val))
        print('min u:{}'.format(self.min_u_val))
        print('std:',end=' ')
        print(self.stds)
        print('1/var:',end=' ')
        print({k:1./v**2 for k,v in self.stds.items()})
        print('norms:',end=' ')
        print(self.residuals)
コード例 #8
0
ファイル: freqinv.py プロジェクト: mfkiwl/geoist
class FreqInvModel:
    def __init__(self,conf_file=None,**kwargs):
        self.confs = {'nzyx':[4,4,4],
                      'nobsyx':None,
                      'smooth_components':None,
                      'depth_scaling':None,
                      'model_density':None,
                      'refer_densities':None,
                      'weights':None,
                      'source_volume':None,
                      'obs_area':None,
                      'data_dir':'./'}
        confs = dict()
        if not conf_file is None:
            with open(conf_file) as f:
                confs = json.load(f)
        self.confs = {**self.confs,**confs,**kwargs}
        self.nz,self.ny,self.nx = self.confs['nzyx']
        if self.confs['nobsyx'] is None:
            self.nobsx = self.nx
            self.nobsy = self.ny
        else:
            self.nobsy,self.nobsx = self.confs['nobsyx']
        self.source_volume = self.confs['source_volume']
        self.obs_area= self.confs['obs_area']
        if self.confs['model_density'] is None:
            self._model_density = None
        else:
            self._model_density = self.confs['model_density'].ravel()
        self._smooth_components = self.confs['smooth_components']
        if self.confs['depth_scaling'] is None:
            self.depth_scaling = np.ones(self.nz)
        else:
            self.depth_scaling = self.confs['depth_scaling']
        self.refer_densities = self.confs['refer_densities']
        self._weights = self.confs['weights']
        self.smop = abic.SmoothOperator()
        self.kernel_op = None

    def gen_mesh(self,height = 0):
        ''' Generate mesh of the model.
        Args:
            height (float): height of the observations.
        '''
        shape = (self.nz, self.ny, self.nx)
        self.mesh = PrismMesh(self.source_volume, shape)
        self.mesh.addprop('density', self._model_density)
        # generate obs grid
        # coordinate: x North-South,y East-West
        # gridder is in the order: (nx,ny)
        self.gen_obs_grid(height=height)

    def gen_obs_grid(self,height=0):
        if self.obs_area is None:
            self.obs_area = (self.source_volume[0]+0.5*self.mesh.dims[0],
                              self.source_volume[1]-0.5*self.mesh.dims[0],
                              self.source_volume[2]+0.5*self.mesh.dims[1],
                              self.source_volume[3]-0.5*self.mesh.dims[1])
        obs_shape = (self.nobsx, self.nobsy)
        self.xp, self.yp, self.zp = gridder.regular(self.obs_area, obs_shape, z=height)

    def _pad_data(self, data, shape):
        n0 = pftrans._nextpow2(2*shape[0])
        n1 = pftrans._nextpow2(2*shape[1])
        nx, ny = shape
        padx = (n0 - nx)//2
        pady = (n1 - ny)//2
        padded = np.pad(data.reshape(shape), ((padx, padx), (pady, pady)),
                       mode='edge')
        return padded, padx, pady

    def gen_kernel(self,gtype='z'):
        xs = np.array(self.mesh.get_xs())
        ys = np.array(self.mesh.get_ys())
        zs = np.array(self.mesh.get_zs())
        x0 = (xs[:-1] + xs[1:])/2.0
        y0 = (ys[:-1] + ys[1:])/2.0
        a = np.abs(xs[:-1]-xs[1:])/2.0
        b = np.abs(ys[:-1]-ys[1:])/2.0
        nx, ny = self.nobsx, self.nobsy
        xmin, xmax, ymin, ymax = self.obs_area
        dx = (xmax - xmin)/(nx - 1)
        dy = (ymax - ymin)/(ny - 1)

        # Pad the array with the edge values to avoid instability
        shape = (nx,ny)
        self.padded, self.padx, self.pady = self._pad_data(self.zp, shape)
        nxe, nye = self.padded.shape
        M_left=(nxe-nx)/2+1
        M_right=M_left+nx-1
        N_down=(nye-ny)/2+1
        N_up=N_down+ny-1

        XXmin=xmin-dx*(M_left-1)
        XXmax=xmax+dx*(nxe-M_right)
        YYmin=ymin-dy*(N_down-1)
        YYmax=ymax+dy*(nye-N_up)
        # we store kx and ky as 1d array
        self.kx = 2*np.pi*np.array(np.fft.fftfreq(self.padded.shape[0], dx))
        self.ky = 2*np.pi*np.array(np.fft.fftfreq(self.padded.shape[1], dy))
        # kz is 2d array
        self.kz = np.sqrt(np.add.outer(self.ky**2,self.kx**2)).ravel()
        self.kxm = np.ma.array(self.kx, mask= self.kx==0)
        self.kym = np.ma.array(self.ky, mask= self.ky==0)
        self.kzm = np.ma.array(self.kz, mask= self.kz==0)

        complex1 = 0+1j
        ## zs should be depth or coordinate?
        self.C = -8*np.pi*G*SI2MGAL
        self.W = np.exp(self.kz*self.zp[0])
        if gtype == 'zz':
            self.W = self.kz * self.W
        self.dW =((np.exp(-np.outer(self.kz,zs[1:]))
                  -np.exp(-np.outer(self.kz,zs[:-1])))/self.kzm.reshape(-1,1))
        self.dW[self.kzm.mask,:] = 0.
        self.dW[self.kzm.mask,:] += zs[:-1] - zs[1:]
        self.dW = self.dW.data
        self.WX = np.exp(complex1*(XXmin-XXmax+xmax+xmin)*self.kx/2.)/dx
        #self.WX = np.ones_like(self.kx)/dx
        if gtype == 'zx':
            self.WX = complex1 * self.kx
        self.FX = np.sin(np.outer(self.kx,a))/self.kxm.reshape(-1,1)
        self.FX[self.kxm.mask,:] = 0.
        self.FX[self.kxm.mask,:] += a
        self.FX = self.FX * np.exp(-complex1*np.outer(self.kx,x0)) * self.WX.reshape(-1,1)
        self.FX = self.FX.data
        self.WY = np.exp(complex1*(YYmin-YYmax+ymax+ymin)*self.ky/2.)/dy
#        self.WY = np.ones_like(self.ky)/dy
        if gtype == 'zy':
            self.WY = complex1 * self.ky
        self.FY = np.sin(np.outer(self.ky,b))/self.kym.reshape(-1,1)
        self.FY[self.kym.mask,:] = 0.
        self.FY[self.kym.mask,:] += b
        self.FY = self.FY * np.exp(-complex1*np.outer(self.ky,y0)) * self.WY.reshape(-1,1)
        self.FY = self.FY.data
        self.solve_prepared = False

    def _gen_rhs(self):
        # default pad 0s
        self.rhs = self.C*self.W*self.obs_freq.T.ravel()
        self.rhs = self.dW.T*self.rhs
#        self.rhs = kron_matvec([np.eye(self.nz),
#                                np.conj(self.FY.T),
#                                np.conj(self.FX.T)],
#                               self.rhs.ravel())
        self.rhs = kron_matvec([np.conj(self.FY.T),
                                np.conj(self.FX.T)],
                               self.rhs.reshape(self.nz,-1)).ravel()

    def forward(self,v=None,update=False):
        ''' Forward modelling gravity and its frequency representation
        Args:
            v (ndarray): model density, if not given, use self.model_density
            update (bool): If False, self.freq and self.obs_field won't be touched.
                           If True, self.freq and self.obs_field will be updated.
        Returns:
            obs_field (ndarray): gravity.
            freq (ndarray): gravity in frequency domain.
        '''
        if v is None:
            v = self._model_density
        freq = kron_matvec([self.FY,self.FX],v.reshape(self.nz,-1))
        freq = freq * self.dW.T
        freq = self.W * np.sum(freq,axis=0)
        freq = self.C * freq.reshape(self.padded.shape[1],self.padded.shape[0]).T
        obs_field = np.real(np.fft.ifft2(freq))
        obs_field = obs_field[self.padx: self.padx + self.nobsx, self.pady: self.pady + self.nobsy].ravel()
        if update:
            self.freq = freq
            self.obs_field = obs_field
        return freq,obs_field

    def _prepare_solve(self):
        if self.solve_prepared:
            return
        tmpX = self.FX @ np.conj(self.FX.T)
        tmpY = [email protected](self.FY.T)
        tmp = np.kron(tmpY,tmpX)
        self.woodSUB = np.zeros_like(tmp)
        for iz in range(self.nz):
            self.woodSUB += self.dW[:,iz].reshape(-1,1)*tmp*np.conj(self.dW[:,iz].reshape(1,-1))
        self.solve_prepared = True

    def do_linear_solve_quiet(self):
        self._gen_rhs()
        if not self.solve_prepared:
            self._prepare_solve()
        weight = np.sum(self._weights['refers'])
        invSUB = self.C**2*weight*self.woodSUB
        invSUB_diag = np.einsum('ii->i',invSUB)
        invSUB_diag += weight**2 * 1./self.W**2
        invSUB = np.linalg.inv(invSUB)
        self.solution = kron_matvec([self.FY,self.FX],self.rhs.reshape(self.nz,-1))
        self.solution = np.sum(self.solution * self.dW.T, axis=0)
        self.solution = invSUB @ self.solution
        self.solution = self.dW.T*self.solution.reshape(1,-1)
        self.solution = kron_matvec([np.conj(self.FY.T),np.conj(self.FX.T)],
                                    self.solution.reshape(self.nz,-1)).ravel()
        self.solution = self.rhs/weight - self.C**2*self.solution

    def calc_res(self):
        pass