Example #1
0
    def test_haloupdate_not_requried(self):
        grid = Grid(shape=(4, 4))
        u = TimeFunction(name='u',
                         grid=grid,
                         space_order=4,
                         time_order=2,
                         save=None)
        v = TimeFunction(name='v',
                         grid=grid,
                         space_order=0,
                         time_order=0,
                         save=5)
        g = Function(name='g', grid=grid, space_order=0)
        i = Function(name='i', grid=grid, space_order=0)

        shift = Constant(name='shift', dtype=np.int32)

        step = Eq(u.forward, u - u.backward + 1)
        g_inc = Inc(g, u * v.subs(grid.time_dim, grid.time_dim - shift))
        i_inc = Inc(i, (v * v).subs(grid.time_dim, grid.time_dim - shift))

        op = Operator([step, g_inc, i_inc])

        # No stencil in the expressions, so no halo update required!
        calls = FindNodes(Call).visit(op)
        assert len(calls) == 0
Example #2
0
def GradientOperator(model, source, receiver, space_order=4, save=True,
                     kernel='OT2', **kwargs):
    """
    Constructor method for the gradient operator in an acoustic media

    :param model: :class:`Model` object containing the physical parameters
    :param source: :class:`PointData` object containing the source geometry
    :param receiver: :class:`PointData` object containing the acquisition geometry
    :param time_order: Time discretization order
    :param space_order: Space discretization order
    """
    m, damp = model.m, model.damp

    # Gradient symbol and wavefield symbols
    grad = Function(name='grad', grid=model.grid)
    u = TimeFunction(name='u', grid=model.grid, save=source.nt if save
                     else None, time_order=2, space_order=space_order)
    v = TimeFunction(name='v', grid=model.grid, save=None,
                     time_order=2, space_order=space_order)
    rec = Receiver(name='rec', grid=model.grid,
                   time_range=receiver.time_range, npoint=receiver.npoint)

    s = model.grid.stepping_dim.spacing
    eqn = iso_stencil(v, m, s, damp, kernel, forward=False)

    if kernel == 'OT2':
        gradient_update = Inc(grad, - u.dt2 * v)
    elif kernel == 'OT4':
        gradient_update = Inc(grad, - (u.dt2 + s**2 / 12.0 * u.laplace2(m**(-2))) * v)
    # Add expression for receiver injection
    receivers = rec.inject(field=v.backward, expr=rec * s**2 / m)

    # Substitute spacing terms to reduce flops
    return Operator(eqn + receivers + [gradient_update], subs=model.spacing_map,
                    name='Gradient', **kwargs)
Example #3
0
    def test_incs_no_atomic(self):
        """
        Test that `Inc`'s don't get a `#pragma omp atomic` if performing
        an increment along a fully parallel loop.
        """
        grid = Grid(shape=(8, 8, 8))
        x, y, z = grid.dimensions
        t = grid.stepping_dim

        f = Function(name='f', grid=grid)
        u = TimeFunction(name='u', grid=grid)
        v = TimeFunction(name='v', grid=grid)

        # Format: u(t, x, nastyness) += 1
        uf = u[t, x, f, z]

        # All loops get collapsed, but the `y` and `z` loops are PARALLEL_IF_ATOMIC,
        # hence an atomic pragma is expected
        op0 = Operator(Inc(uf, 1),
                       opt=('advanced', {
                           'openmp': True,
                           'par-collapse-ncores': 1
                       }))
        assert 'collapse(3)' in str(op0)
        assert 'atomic' in str(op0)

        # Now only `x` is parallelized
        op1 = Operator([Eq(v[t, x, 0, 0], v[t, x, 0, 0] + 1),
                        Inc(uf, 1)],
                       opt=('advanced', {
                           'openmp': True,
                           'par-collapse-ncores': 1
                       }))
        assert 'collapse(1)' in str(op1)
        assert 'atomic' not in str(op1)
Example #4
0
def GradientOperator(model,
                     geometry,
                     space_order=4,
                     save=True,
                     kernel='OT2',
                     **kwargs):
    """
    Construct a gradient operator in an acoustic media.

    Parameters
    ----------
    model : Model
        Object containing the physical parameters.
    geometry : AcquisitionGeometry
        Geometry object that contains the source (SparseTimeFunction) and
        receivers (SparseTimeFunction) and their position.
    space_order : int, optional
        Space discretization order.
    save : int or Buffer, optional
        Option to store the entire (unrolled) wavefield.
    kernel : str, optional
        Type of discretization, centered or shifted.
    """
    m, damp = model.m, model.damp

    # Gradient symbol and wavefield symbols
    grad = Function(name='grad', grid=model.grid)
    u = TimeFunction(name='u',
                     grid=model.grid,
                     save=geometry.nt if save else None,
                     time_order=2,
                     space_order=space_order)
    v = TimeFunction(name='v',
                     grid=model.grid,
                     save=None,
                     time_order=2,
                     space_order=space_order)
    rec = Receiver(name='rec',
                   grid=model.grid,
                   time_range=geometry.time_axis,
                   npoint=geometry.nrec)

    s = model.grid.stepping_dim.spacing
    eqn = iso_stencil(v, m, s, damp, kernel, forward=False)

    if kernel == 'OT2':
        gradient_update = Inc(grad, -u.dt2 * v)
    elif kernel == 'OT4':
        gradient_update = Inc(
            grad, -(u.dt2 + s**2 / 12.0 * u.biharmonic(m**(-2))) * v)
    # Add expression for receiver injection
    receivers = rec.inject(field=v.backward, expr=rec * s**2 / m)

    # Substitute spacing terms to reduce flops
    return Operator(eqn + receivers + [gradient_update],
                    subs=model.spacing_map,
                    name='Gradient',
                    **kwargs)
def forward_freq_modeling(model, src_coords, wavelet, rec_coords, freq, space_order=8, nb=40, dt=None, factor=None):
    # Forward modeling with on-the-fly DFT of forward wavefields
    clear_cache()

    # Parameters
    nt = wavelet.shape[0]
    if dt is None:
        dt = model.critical_dt
    m, rho, damp = model.m, model.rho, model.damp

    freq_dim = Dimension(name='freq_dim')
    time = model.grid.time_dim
    if factor is None:
        factor = int(1 / (dt*4*np.max(freq)))
        tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)
    if factor==1:
        tsave = time
    else:
        tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)
    print("DFT subsampling factor: ", factor)

    # Create wavefields
    nfreq = freq.shape[0]
    u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=space_order)
    f = Function(name='f', dimensions=(freq_dim,), shape=(nfreq,))
    f.data[:] = freq[:]
    ufr = Function(name='ufr', dimensions=(freq_dim,) + u.indices[1:], shape=(nfreq,) + model.shape_domain)
    ufi = Function(name='ufi', dimensions=(freq_dim,) + u.indices[1:], shape=(nfreq,) + model.shape_domain)

    ulaplace, rho = acoustic_laplacian(u, rho)

    # Set up PDE and rearrange
    stencil = damp * (2.0 * u - damp * u.backward + dt**2 * rho / m * ulaplace)
    expression = [Eq(u.forward, stencil)]
    expression += [Inc(ufr, factor*u*cos(2*np.pi*f*tsave*factor*dt))]
    expression += [Inc(ufi, -factor*u*sin(2*np.pi*f*tsave*factor*dt))]

    # Source symbol with input wavelet
    src = PointSource(name='src', grid=model.grid, ntime=nt, coordinates=src_coords)
    src.data[:] = wavelet[:]
    src_term = src.inject(field=u.forward, expr=src * dt**2 / m)

    # Data is sampled at receiver locations
    rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)
    rec_term = rec.interpolate(expr=u)

    # Create operator and run
    set_log_level('ERROR')
    expression += src_term + rec_term
    subs = model.spacing_map
    subs[u.grid.time_dim.spacing] = dt
    op = Operator(expression, subs=subs, dse='advanced', dle='advanced',
                  name="Forward%s" % randint(1e5))
    op()

    return rec.data, ufr, ufi
Example #6
0
def otf_dft(u, freq, dt, factor=None):
    """
    On the fly DFT wavefield (frequency slices) and expression

    Parameters
    ----------
    u: TimeFunction or Tuple
        Forward wavefield
    freq: Array
        Array of frequencies for on-the-fly DFT
    factor: int
        Subsampling factor for DFT
    """
    if freq is None:
        return [], None

    # init
    dft_modes = []

    # Subsampled dft time axis
    time = as_tuple(u)[0].grid.time_dim
    tsave, factor = sub_time(time, factor, dt=dt, freq=freq)

    # Frequencies
    nfreq = np.shape(freq)[0]
    freq_dim = DefaultDimension(name='freq_dim', default_value=nfreq)
    f = Function(name='f', dimensions=(freq_dim, ), shape=(nfreq, ))
    f.data[:] = np.array(freq[:])
    # Get fourier atoms to avoid shitty perf
    cf = TimeFunction(name="cf",
                      dimensions=(as_tuple(u)[0].grid.stepping_dim, freq_dim),
                      shape=(3, nfreq),
                      time_order=2)
    sf = TimeFunction(name="sf",
                      dimensions=(as_tuple(u)[0].grid.stepping_dim, freq_dim),
                      shape=(3, nfreq),
                      time_order=2)
    # Pulsation
    omega_t = 2 * np.pi * f * tsave * factor * dt
    dft = [Eq(cf, cos(omega_t)), Eq(sf, sin(omega_t))]
    for wf in as_tuple(u):
        ufr = Function(name='ufr%s' % wf.name,
                       dimensions=(freq_dim, ) + wf.indices[1:],
                       grid=wf.grid,
                       shape=(nfreq, ) + wf.shape[1:])
        ufi = Function(name='ufi%s' % wf.name,
                       dimensions=(freq_dim, ) + wf.indices[1:],
                       grid=wf.grid,
                       shape=(nfreq, ) + wf.shape[1:])
        dft += [Inc(ufr, factor * cf * wf)]
        dft += [Inc(ufi, -factor * sf * wf)]
        dft_modes += [(ufr, ufi)]
    return dft, dft_modes
def grad(nx, ny, nchi, ncho, n, m, xdat, dydat):

    # Image size
    dt = np.float32
    x, y, ci, co = (SpaceDimension("x"), SpaceDimension("y"), Dimension("ci"),
                    Dimension("co"))
    grid = Grid((nx, ny), dtype=dt, dimensions=(x, y))

    # Image
    X = Function(name="xin", dimensions=(ci, x, y),
                 shape=(nchi, nx, ny), grid=grid, space_order=n//2)

    # Output
    dy = Function(name="dy", dimensions=(co, x, y),
                  shape=(ncho, nx, ny), grid=grid, space_order=n//2)

    # Weights
    i, j = Dimension("i"), Dimension("j")
    dW = Function(name="dW", dimensions=(co, ci, i, j),
                  shape=(ncho, nchi, n, m), grid=grid)

    # Gradient
    grad_eq = Inc(dW[co, ci, i, j], dy[co, x, y]*X[ci, x+i-n//2, y+j-m//2])
    op = Operator(grad_eq)
    op.cfunction

    X.data[:] = xdat[:]
    dy.data[:] = dydat[:]

    return op
Example #8
0
    def test_array_reduction(self, so, dim):
        """
        Test generation of OpenMP reduction clauses involving Function's.
        """
        grid = Grid(shape=(3, 3, 3))
        d = grid.dimensions[dim]

        f = Function(name='f',
                     shape=(3, ),
                     dimensions=(d, ),
                     grid=grid,
                     space_order=so)
        u = TimeFunction(name='u', grid=grid)

        op = Operator(Inc(f, u + 1),
                      opt=('openmp', {
                          'par-collapse-ncores': 1
                      }))

        iterations = FindNodes(Iteration).visit(op)
        assert "reduction(+:f[0:f_vec->size[0]])" in iterations[1].pragmas[
            0].value

        try:
            op(time_M=1)
        except:
            # Older gcc <6.1 don't support reductions on array
            info("Un-supported older gcc version for array reduction")
            assert True
            return

        assert np.allclose(f.data, 18)
Example #9
0
def GradientOperator(model,
                     geometry,
                     space_order=4,
                     kernel='sls',
                     time_order=2,
                     save=True,
                     **kwargs):
    """
    Construct a gradient operator in an acoustic media.

    Parameters
    ----------
    model : Model
        Object containing the physical parameters.
    geometry : AcquisitionGeometry
        Geometry object that contains the source (SparseTimeFunction) and
        receivers (SparseTimeFunction) and their position.
    space_order : int, optional
        Space discretization order.
    save : int or Buffer, optional
        Option to store the entire (unrolled) wavefield.
    kernel : selects a visco-acoustic equation from the options below:
        sls (Standard Linear Solid) :
        1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
        viscoacoustic equation
        2nd order - Bai et al. (2014) viscoacoustic equation
        ren - Ren et al. (2014) viscoacoustic equation
        deng_mcmechan - Deng and McMechan (2007) viscoacoustic equation
        Defaults to sls 2nd order.
    """
    # Gradient symbol and wavefield symbols
    save_t = geometry.nt if save else None

    grad = Function(name='grad', grid=model.grid)
    p = TimeFunction(name='p',
                     grid=model.grid,
                     time_order=2,
                     space_order=space_order,
                     save=save_t,
                     staggered=NODE)
    pa = TimeFunction(name='pa',
                      grid=model.grid,
                      time_order=time_order,
                      space_order=space_order,
                      staggered=NODE)

    # Equations kernels
    eq_kernel = kernels[kernel]
    eqn = eq_kernel(model, geometry, pa, forward=False, save=False, **kwargs)

    gradient_update = Inc(grad, -p.dt2 * pa)

    # Add expression for receiver injection
    _, recterm = src_rec(pa, model, geometry, forward=False)

    # Substitute spacing terms to reduce flops
    return Operator(eqn + recterm + [gradient_update],
                    subs=model.spacing_map,
                    name='Gradient',
                    **kwargs)
Example #10
0
    def test_xcor_from_saved(self, opt, gpu_fit):
        nt = 10
        grid = Grid(shape=(300, 300, 300))
        time_dim = grid.time_dim

        period = 2
        factor = Constant(name='factor', value=period, dtype=np.int32)
        time_sub = ConditionalDimension(name="time_sub", parent=time_dim, factor=factor)

        g = Function(name='g', grid=grid)
        v = TimeFunction(name='v', grid=grid)
        usave = TimeFunction(name='usave', grid=grid, time_order=0,
                             save=int(nt//factor.data), time_dim=time_sub)
        # For the given `nt` and grid shape, `usave` is roughly 4*5*300**3=~ .5GB of data

        for i in range(int(nt//period)):
            usave.data[i, :] = i
        v.data[:] = i*2 + 1

        # Assuming nt//period=5, we are computing, over 5 iterations:
        # g = 4*4  [time=8] + 3*3 [time=6] + 2*2 [time=4] + 1*1 [time=2]
        op = Operator([Eq(v.backward, v - 1), Inc(g, usave*(v/2))],
                      opt=(opt, {'gpu-fit': usave if gpu_fit else None}))

        op.apply(time_M=nt-1)

        assert np.all(g.data == 30)
Example #11
0
def otf_dft(u, freq, dt, factor=None):
    """
    On the fly DFT wavefield (frequency slices) and expression

    Parameters
    ----------
    u: TimeFunction or Tuple
        Forward wavefield
    freq: Array
        Array of frequencies for on-the-fly DFT
    factor: int
        Subsampling factor for DFT
    """
    if freq is None:
        return [], None

    # init
    dft = []
    dft_modes = []

    # Subsampled dft time axis
    time = as_tuple(u)[0].grid.time_dim
    tsave, factor = sub_time(time, factor, dt=dt, freq=freq)

    # Frequencies
    nfreq = freq.shape[0]
    freq_dim = DefaultDimension(name='freq_dim', default_value=nfreq)
    f = Function(name='f', dimensions=(freq_dim, ), shape=(nfreq, ))
    f.data[:] = freq[:]

    # Pulsation
    omega_t = 2 * np.pi * f * tsave * factor * dt
    for wf in as_tuple(u):
        ufr = Function(name='ufr%s' % wf.name,
                       dimensions=(freq_dim, ) + wf.indices[1:],
                       grid=wf.grid,
                       shape=(nfreq, ) + wf.shape[1:])
        ufi = Function(name='ufi%s' % wf.name,
                       dimensions=(freq_dim, ) + wf.indices[1:],
                       grid=wf.grid,
                       shape=(nfreq, ) + wf.shape[1:])
        dft += [Inc(ufr, factor * cos(omega_t) * wf)]
        dft += [Inc(ufi, -factor * sin(omega_t) * wf)]
        dft_modes += [(ufr, ufi)]
    return dft, dft_modes
Example #12
0
    def test_from_different_nests(self):
        """
        Check that aliases arising from two sets of equations A and B,
        characterized by a flow dependence, are scheduled within A's and B's
        loop nests respectively.
        """
        grid = Grid(shape=(3, 3, 3))
        x, y, z = grid.dimensions  # noqa
        t = grid.stepping_dim
        i = Dimension(name='i')

        f = Function(name='f', grid=grid)
        f.data_with_halo[:] = 1.
        g = Function(name='g', shape=(3, ), dimensions=(i, ))
        g.data[:] = 2.
        u = TimeFunction(name='u', grid=grid, space_order=3)
        v = TimeFunction(name='v', grid=grid, space_order=3)

        # Leads to 3D aliases
        eqns = [
            Eq(u.forward,
               ((u[t, x, y, z] + u[t, x + 1, y + 1, z + 1]) * 3 * f +
                (u[t, x + 2, y + 2, z + 2] + u[t, x + 3, y + 3, z + 3]) * 3 * f
                + 1)),
            Inc(u[t + 1, i, i, i], g + 1),
            Eq(v.forward,
               ((v[t, x, y, z] + v[t, x + 1, y + 1, z + 1]) * 3 * u.forward +
                (v[t, x + 2, y + 2, z + 2] + v[t, x + 3, y + 3, z + 3]) * 3 *
                u.forward + 1))
        ]
        op0 = Operator(eqns, dse='noop', dle=('noop', {'openmp': True}))
        op1 = Operator(eqns,
                       dse='aggressive',
                       dle=('advanced', {
                           'openmp': True
                       }))

        # Check code generation
        assert 'bf0' in op1._func_table
        assert 'bf1' in op1._func_table
        trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
        assert len(trees) == 2
        assert trees[0][-1].nodes[0].body[0].write.is_Array
        assert trees[1][-1].nodes[0].body[0].write is u
        trees = retrieve_iteration_tree(op1._func_table['bf1'].root)
        assert len(trees) == 2
        assert trees[0][-1].nodes[0].body[0].write.is_Array
        assert trees[1][-1].nodes[0].body[0].write is v

        # Check numerical output
        op0(time_M=1)
        exp = np.copy(u.data[:])
        u.data_with_halo[:] = 0.
        op1(time_M=1)
        assert np.all(u.data == exp)
Example #13
0
def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False):
    """
    Initialize damping field with an absorbing boundary layer.

    Parameters
    ----------
    damp : Function
        The damping field for absorbing boundary condition.
    nbl : int
        Number of points in the damping layer.
    spacing :
        Grid spacing coefficient.
    mask : bool, optional
        whether the dampening is a mask or layer.
        mask => 1 inside the domain and decreases in the layer
        not mask => 0 inside the domain and increase in the layer
    """

    eqs = [Eq(damp, 1.0 if abc_type == "mask" else 0.0)]
    for (nbl, nbr), d in zip(padsizes, damp.dimensions):
        if not fs or d is not damp.dimensions[-1]:
            dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbl)
            # left
            dim_l = SubDimension.left(name='abc_%s_l' % d.name,
                                      parent=d,
                                      thickness=nbl)
            pos = Abs((nbl - (dim_l - d.symbolic_min) + 1) / float(nbl))
            val = dampcoeff * (pos - sin(2 * np.pi * pos) / (2 * np.pi))
            val = -val if abc_type == "mask" else val
            eqs += [Inc(damp.subs({d: dim_l}), val / d.spacing)]
        # right
        dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbr)
        dim_r = SubDimension.right(name='abc_%s_r' % d.name,
                                   parent=d,
                                   thickness=nbr)
        pos = Abs((nbr - (d.symbolic_max - dim_r) + 1) / float(nbr))
        val = dampcoeff * (pos - sin(2 * np.pi * pos) / (2 * np.pi))
        val = -val if abc_type == "mask" else val
        eqs += [Inc(damp.subs({d: dim_r}), val / d.spacing)]

    Operator(eqs, name='initdamp')()
Example #14
0
def initialize_damp(damp, nbl, spacing, mask=False):
    """
    Initialise damping field with an absorbing boundary layer.

    Parameters
    ----------
    damp : Function
        The damping field for absorbing boundary condition.
    nbl : int
        Number of points in the damping layer.
    spacing :
        Grid spacing coefficient.
    mask : bool, optional
        whether the dampening is a mask or layer.
        mask => 1 inside the domain and decreases in the layer
        not mask => 0 inside the domain and increase in the layer
    """
    dampcoeff = 1.5 * np.log(1.0 / 0.001) / (40)

    eqs = [Eq(damp, 1.0)] if mask else []
    for d in damp.dimensions:
        # left
        dim_l = SubDimension.left(name='abc_%s_l' % d.name,
                                  parent=d,
                                  thickness=nbl)
        pos = Abs((nbl - (dim_l - d.symbolic_min) + 1) / float(nbl))
        val = dampcoeff * (pos - sin(2 * np.pi * pos) / (2 * np.pi))
        val = -val if mask else val
        eqs += [Inc(damp.subs({d: dim_l}), val / d.spacing)]
        # right
        dim_r = SubDimension.right(name='abc_%s_r' % d.name,
                                   parent=d,
                                   thickness=nbl)
        pos = Abs((nbl - (d.symbolic_max - dim_r) + 1) / float(nbl))
        val = dampcoeff * (pos - sin(2 * np.pi * pos) / (2 * np.pi))
        val = -val if mask else val
        eqs += [Inc(damp.subs({d: dim_r}), val / d.spacing)]

    # TODO: Figure out why yask doesn't like it with dse/dle
    Operator(eqs, name='initdamp', dse='noop', dle='noop')()
Example #15
0
def JacobianAdjOperator(model, geometry, space_order=4,
                        save=True, **kwargs):
    """
    Construct a linearized JacobianAdjoint modeling Operator in a TTI media.

    Parameters
    ----------
    model : Model
        Object containing the physical parameters.
    geometry : AcquisitionGeometry
        Geometry object that contains the source (SparseTimeFunction) and
        receivers (SparseTimeFunction) and their position.
    space_order : int, optional
        Space discretization order.
    save : int or Buffer, optional
        Option to store the entire (unrolled) wavefield.
    """
    dt = model.grid.stepping_dim.spacing
    m = model.m
    time_order = 2

    # Gradient symbol and wavefield symbols
    u0 = TimeFunction(name='u0', grid=model.grid, save=geometry.nt if save
                      else None, time_order=time_order, space_order=space_order)
    v0 = TimeFunction(name='v0', grid=model.grid, save=geometry.nt if save
                      else None, time_order=time_order, space_order=space_order)

    du = TimeFunction(name="du", grid=model.grid, save=None,
                      time_order=time_order, space_order=space_order)
    dv = TimeFunction(name="dv", grid=model.grid, save=None,
                      time_order=time_order, space_order=space_order)

    dm = Function(name="dm", grid=model.grid)

    rec = Receiver(name='rec', grid=model.grid, time_range=geometry.time_axis,
                   npoint=geometry.nrec)

    # FD kernels of the PDE
    FD_kernel = kernels[('centered', len(model.shape))]
    eqn = FD_kernel(model, du, dv, space_order, forward=False)

    dm_update = Inc(dm, - (u0 * du.dt2 + v0 * dv.dt2))

    # Add expression for receiver injection
    rec_term = rec.inject(field=du.backward, expr=rec * dt**2 / m)
    rec_term += rec.inject(field=dv.backward, expr=rec * dt**2 / m)

    # Substitute spacing terms to reduce flops
    return Operator(eqn + rec_term + [dm_update], subs=model.spacing_map,
                    name='GradientTTI', **kwargs)
Example #16
0
def test_nofission_as_illegal():
    """
    Test there's no fission if dependencies would break.
    """
    grid = Grid(shape=(20, 20))
    x, y = grid.dimensions

    f = Function(name='f', grid=grid, dimensions=(y, ), shape=(20, ))
    u = TimeFunction(name='u', grid=grid)
    v = TimeFunction(name='v', grid=grid)

    eqns = [Inc(f, v + 1.), Eq(u.forward, f[y + 1] + 1.)]

    op = Operator(eqns, opt='fission')

    assert_structure(op, ['t,x,y', 't,x,y'], 't,x,y,y')
Example #17
0
    def test_avoid_redundant_haloupdate(self):
        grid = Grid(shape=(12,))
        x = grid.dimensions[0]
        t = grid.stepping_dim

        i = Dimension(name='i')
        j = Dimension(name='j')

        f = TimeFunction(name='f', grid=grid)
        g = Function(name='g', grid=grid)

        op = Operator([Eq(f.forward, f[t, x-1] + f[t, x+1] + 1.),
                       Inc(f[t+1, i], 1.),  # no halo update as it's an Inc
                       Eq(g, f[t, j] + 1)])  # access `f` at `t`, not `t+1`!

        calls = FindNodes(Call).visit(op)
        assert len(calls) == 1
Example #18
0
    def test_array_reduction(self, so, dim):
        """
        Test generation of OpenMP reduction clauses involving Function's.
        """
        grid = Grid(shape=(3, 3, 3))
        d = grid.dimensions[dim]

        f = Function(name='f',
                     shape=(3, ),
                     dimensions=(d, ),
                     grid=grid,
                     space_order=so)
        u = TimeFunction(name='u', grid=grid)

        op = Operator(Inc(f, u + 1),
                      opt=('openmp', {
                          'par-collapse-ncores': 1
                      }))

        iterations = FindNodes(Iteration).visit(op)
        parallelized = iterations[dim + 1]
        assert parallelized.pragmas
        if parallelized is iterations[-1]:
            # With the `f[z] += u[t0][x + 1][y + 1][z + 1] + 1` expr, the innermost
            # `z` Iteration gets parallelized, nothing is collapsed, hence no
            # reduction is required
            assert "reduction" not in parallelized.pragmas[0].value
        elif Ompizer._support_array_reduction(configuration['compiler']):
            assert "reduction(+:f[0:f_vec->size[0]])" in parallelized.pragmas[
                0].value
        else:
            # E.g. old GCC's
            assert "atomic update" in str(iterations[-1])

        try:
            op(time_M=1)
        except:
            # Older gcc <6.1 don't support reductions on array
            info("Un-supported older gcc version for array reduction")
            assert True
            return

        assert np.allclose(f.data, 18)
Example #19
0
    def test_timeparallel_reduction(self):
        grid = Grid(shape=(3, 3, 3))
        i = Dimension(name='i')

        f = Function(name='f', shape=(1,), dimensions=(i,), grid=grid)
        u = TimeFunction(name='u', grid=grid)

        op = Operator(Inc(f[0], u + 1), opt='noop')

        trees = retrieve_iteration_tree(op)
        assert len(trees) == 1
        tree = trees[0]
        assert all(i.is_ParallelRelaxed and not i.is_Parallel for i in tree)

        # The time loop is not in OpenMP canonical form, so it won't be parallelized
        assert not tree.root.pragmas
        assert len(tree[1].pragmas) == 1
        assert tree[1].pragmas[0].value ==\
            'omp target teams distribute parallel for collapse(3) reduction(+:f[0])'
Example #20
0
    def test_simd_space_invariant(self):
        """
        Similar to test_space_invariant_v3, testing simd vectorization happens
        in the correct place.
        """
        grid = Grid(shape=(10, 10, 10))
        x, y, z = grid.dimensions

        f = Function(name='f', grid=grid)
        eq = Inc(f, cos(x * y) + cos(x * z))

        op = Operator(eq, opt=('advanced', {'openmp': True}))
        iterations = FindNodes(Iteration).visit(op)

        assert 'omp for collapse(1) schedule(static,1)' in iterations[
            0].pragmas[0].value
        assert 'omp simd' in iterations[1].pragmas[0].value
        assert 'omp simd' in iterations[3].pragmas[0].value

        op.apply()
        assert np.isclose(np.linalg.norm(f.data), 37.1458, rtol=1e-5)
Example #21
0
def weighted_norm(u, weight=None):
    """
    Space-time norm of a wavefield, split into norm in time first then in space to avoid
    breaking loops

    Parameters
    ----------
    u: TimeFunction or Tuple of TimeFunction
        Wavefield to take the norm of
    weight: String
        Spacial weight to apply
    """
    grid = as_tuple(u)[0].grid
    expr = grid.time_dim.spacing * sum(uu**2 for uu in as_tuple(u))
    # Norm in time
    norm_vy2_t = Function(name="nvy2t", grid=grid, space_order=0)
    n_t = [Eq(norm_vy2_t, norm_vy2_t + expr)]
    # Then norm in space
    i = Dimension(name="i", )
    norm_vy2 = Function(name="nvy2", shape=(1, ), dimensions=(i, ), grid=grid)
    w = weight or 1
    n_s = [Inc(norm_vy2[0], norm_vy2_t / w**2)]
    # Return norm object and expr
    return norm_vy2, (n_t, n_s)
Example #22
0
def subsampled_gradient(factor=1, tn=2000.):
    t0 = 0.  # Simulation starts a t=0

    shape = (100, 100)
    origin = (0., 0.)

    spacing = (15., 15.)

    space_order = 4

    vp = np.empty(shape, dtype=np.float64)
    vp[:, :51] = 1.5
    vp[:, 51:] = 2.5

    model = Model(vp=vp, origin=origin, shape=shape, spacing=spacing,
                  space_order=space_order, nbl=10)

    dt = model.critical_dt  # Time step from model grid spacing
    time_range = TimeAxis(start=t0, stop=tn, step=dt)
    nt = time_range.num  # number of time steps

    f0 = 0.010  # Source peak frequency is 10Hz (0.010 kHz)
    src = RickerSource(
        name='src',
        grid=model.grid,
        f0=f0,
        time_range=time_range)

    src.coordinates.data[0, :] = np.array(model.domain_size) * .5
    src.coordinates.data[0, -1] = 20.  # Depth is 20m

    rec = Receiver(
        name='rec',
        grid=model.grid,
        npoint=101,
        time_range=time_range)  # new
    rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=101)
    rec.coordinates.data[:, 1] = 20.  # Depth is 20m

    save_elements = (nt + factor - 1) // factor

    print(save_elements)

    time_subsampled = ConditionalDimension(
        't_sub', parent=model.grid.time_dim, factor=factor)
    usave = TimeFunction(name='usave', grid=model.grid, time_order=2,
                         space_order=space_order, save=save_elements,
                         time_dim=time_subsampled)

    u = TimeFunction(name="u", grid=model.grid, time_order=2,
                     space_order=space_order)
    pde = model.m * u.dt2 - u.laplace + model.damp * u.dt
    stencil = Eq(u.forward, solve(pde, u.forward))
    src_term = src.inject(
        field=u.forward,
        expr=src * dt**2 / model.m,
        offset=model.nbl)
    rec_term = rec.interpolate(expr=u, offset=model.nbl)

    fwd_op = Operator([stencil] + src_term + [Eq(usave, u)] + rec_term,
                      subs=model.spacing_map)  # operator with snapshots
    v = TimeFunction(name='v', grid=model.grid, save=None,
                     time_order=2, space_order=space_order)
    grad = Function(name='grad', grid=model.grid)

    rev_pde = model.m * v.dt2 - v.laplace + model.damp * v.dt.T
    rev_stencil = Eq(v.backward, solve(rev_pde, v.backward))
    gradient_update = Inc(grad, - usave.dt2 * v)

    s = model.grid.stepping_dim.spacing

    receivers = rec.inject(field=v.backward, expr=rec*s**2/model.m)
    rev_op = Operator([rev_stencil] + receivers + [gradient_update],
                      subs=model.spacing_map)

    fwd_op(time=nt - 2, dt=model.critical_dt)

    rev_op(dt=model.critical_dt, time=nt-16)

    return grad.data
zind = Scalar(name='zind', dtype=np.int32)
xb_size = Scalar(name='xb_size', dtype=np.int32)
yb_size = Scalar(name='yb_size', dtype=np.int32)
x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)
y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)

eq0 = Eq(sp_zi.symbolic_max,
         nnz_sp_source_mask[x, y] - 1,
         implicit_dims=(time, x, y))
eq1 = Eq(zind, sp_source_mask[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))

myexpr = source_mask[x, y, zind] * save_src[time, source_id[x, y, zind]]

eq2 = Inc(usol.forward[t + 1, x, y, zind],
          myexpr,
          implicit_dims=(time, x, y, sp_zi))

pde_2 = model.m * usol.dt2 - usol.laplace + model.damp * usol.dt
stencil_2 = Eq(usol.forward, solve(pde_2, usol.forward))

block_sizes = Function(name='block_sizes',
                       shape=(4, ),
                       dimensions=(b_dim, ),
                       space_order=0,
                       dtype=np.int32)

# import pdb; pdb.set_trace()
block_sizes.data[:] = args.bsizes

# import pdb; pdb.set_trace()
Example #24
0
    def forward(self,
                src=None,
                rec=None,
                u=None,
                v=None,
                vp=None,
                epsilon=None,
                delta=None,
                theta=None,
                phi=None,
                save=False,
                kernel='centered',
                **kwargs):
        """
        Forward modelling function that creates the necessary
        data objects for running a forward modelling operator.

        Parameters
        ----------
        geometry : AcquisitionGeometry
            Geometry object that contains the source (SparseTimeFunction) and
            receivers (SparseTimeFunction) and their position.
        u : TimeFunction, optional
            The computed wavefield first component.
        v : TimeFunction, optional
            The computed wavefield second component.
        vp : Function or float, optional
            The time-constant velocity.
        epsilon : Function or float, optional
            The time-constant first Thomsen parameter.
        delta : Function or float, optional
            The time-constant second Thomsen parameter.
        theta : Function or float, optional
            The time-constant Dip angle (radians).
        phi : Function or float, optional
            The time-constant Azimuth angle (radians).
        save : bool, optional
            Whether or not to save the entire (unrolled) wavefield.
        kernel : str, optional
            Type of discretization, centered or shifted.

        Returns
        -------
        Receiver, wavefield and performance summary.
        """
        if kernel == 'staggered':
            time_order = 1
            dims = self.model.space_dimensions
            stagg_u = (-dims[-1])
            stagg_v = (-dims[0],
                       -dims[1]) if self.model.grid.dim == 3 else (-dims[0])
        else:
            time_order = 2
            stagg_u = stagg_v = None
        # Source term is read-only, so re-use the default
        src = src or self.geometry.src
        # Create a new receiver object to store the result
        rec = rec or Receiver(name='rec',
                              grid=self.model.grid,
                              time_range=self.geometry.time_axis,
                              coordinates=self.geometry.rec_positions)
        # Create the forward wavefield if not provided

        if u is None:
            u = TimeFunction(name='u',
                             grid=self.model.grid,
                             staggered=stagg_u,
                             save=self.geometry.nt if save else None,
                             time_order=time_order,
                             space_order=self.space_order)
        # Create the forward wavefield if not provided
        if v is None:
            v = TimeFunction(name='v',
                             grid=self.model.grid,
                             staggered=stagg_v,
                             save=self.geometry.nt if save else None,
                             time_order=time_order,
                             space_order=self.space_order)

        print("Initial Norm u", norm(u))
        print("Initial Norm v", norm(v))

        if kernel == 'staggered':
            vx, vz, vy = particle_velocity_fields(self.model, self.space_order)
            kwargs["vx"] = vx
            kwargs["vz"] = vz
            if vy is not None:
                kwargs["vy"] = vy

        # Pick vp and Thomsen parameters from model unless explicitly provided
        kwargs.update(
            self.model.physical_params(vp=vp,
                                       epsilon=epsilon,
                                       delta=delta,
                                       theta=theta,
                                       phi=phi))
        if self.model.dim < 3:
            kwargs.pop('phi', None)
        # Execute operator and return wavefield and receiver data

        op = self.op_fwd(kernel, save)
        print(kwargs)
        summary = op.apply(src=src,
                           u=u,
                           v=v,
                           dt=kwargs.pop('dt', self.dt),
                           **kwargs)

        regnormu = norm(u)
        regnormv = norm(v)
        print("Norm u:", regnormu)
        print("Norm v:", regnormv)

        if 0:
            cmap = plt.cm.get_cmap("viridis")
            values = u.data[0, :, :, :]
            vistagrid = pv.UniformGrid()
            vistagrid.dimensions = np.array(values.shape) + 1
            vistagrid.spacing = (1, 1, 1)
            vistagrid.origin = (0, 0, 0
                                )  # The bottom left corner of the data set
            vistagrid.cell_arrays["values"] = values.flatten(order="F")
            vistaslices = vistagrid.slice_orthogonal()
            vistagrid.plot(show_edges=True)
            vistaslices.plot(cmap=cmap)

        print("=========================================")

        s_u = TimeFunction(name='s_u',
                           grid=self.model.grid,
                           space_order=self.space_order,
                           time_order=1)
        s_v = TimeFunction(name='s_v',
                           grid=self.model.grid,
                           space_order=self.space_order,
                           time_order=1)

        src_u = src.inject(field=s_u.forward,
                           expr=src * self.model.grid.time_dim.spacing**2 /
                           self.model.m)
        src_v = src.inject(field=s_v.forward,
                           expr=src * self.model.grid.time_dim.spacing**2 /
                           self.model.m)

        op_f = Operator([src_u, src_v])
        op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))

        print("Norm s_u", norm(s_u))
        print("Norm s_v", norm(s_v))

        # Get the nonzero indices
        nzinds = np.nonzero(s_u.data[0])  # nzinds is a tuple
        assert len(nzinds) == len(self.model.grid.shape)
        shape = self.model.grid.shape
        x, y, z = self.model.grid.dimensions
        time = self.model.grid.time_dim
        t = self.model.grid.stepping_dim

        source_mask = Function(name='source_mask',
                               shape=self.model.grid.shape,
                               dimensions=(x, y, z),
                               space_order=0,
                               dtype=np.int32)
        source_id = Function(name='source_id',
                             shape=shape,
                             dimensions=(x, y, z),
                             space_order=0,
                             dtype=np.int32)
        print("source_id data indexes start from 0 now !!!")

        # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))
        source_id.data[nzinds[0], nzinds[1],
                       nzinds[2]] = tuple(np.arange(len(nzinds[0])))

        source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1
        # plot3d(source_mask.data, model)
        # import pdb; pdb.set_trace()

        print("Number of unique affected points is: %d", len(nzinds[0]))

        # Assert that first and last index are as expected
        assert (source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)
        assert (source_id.data[nzinds[0][-1], nzinds[1][-1],
                               nzinds[2][-1]] == len(nzinds[0]) - 1)
        assert (source_id.data[nzinds[0][len(nzinds[0]) - 1],
                               nzinds[1][len(nzinds[0]) - 1],
                               nzinds[2][len(nzinds[0]) -
                                         1]] == len(nzinds[0]) - 1)

        assert (np.all(np.nonzero(source_id.data)) == np.all(
            np.nonzero(source_mask.data)))
        assert (np.all(np.nonzero(source_id.data)) == np.all(
            np.nonzero(s_u.data[0])))

        print(
            "-At this point source_mask and source_id have been popoulated correctly-"
        )

        nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])

        nnz_sp_source_mask = Function(name='nnz_sp_source_mask',
                                      shape=(list(nnz_shape)),
                                      dimensions=(x, y),
                                      space_order=0,
                                      dtype=np.int32)

        nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)
        inds = np.where(source_mask.data == 1.)
        print("Grid - source positions:", inds)
        maxz = len(np.unique(inds[-1]))
        # Change only 3rd dim
        sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1],
                        maxz)

        assert (len(
            nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions) -
                                               1))

        # Note : sparse_source_id is not needed as long as sparse info is kept in mask
        # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]

        id_dim = Dimension(name='id_dim')
        b_dim = Dimension(name='b_dim')

        save_src_u = TimeFunction(name='save_src_u',
                                  shape=(src.shape[0], nzinds[1].shape[0]),
                                  dimensions=(src.dimensions[0], id_dim))
        save_src_v = TimeFunction(name='save_src_v',
                                  shape=(src.shape[0], nzinds[1].shape[0]),
                                  dimensions=(src.dimensions[0], id_dim))

        save_src_u_term = src.inject(
            field=save_src_u[src.dimensions[0], source_id],
            expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)
        save_src_v_term = src.inject(
            field=save_src_v[src.dimensions[0], source_id],
            expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)

        print("Injecting to empty grids")
        op1 = Operator([save_src_u_term, save_src_v_term])
        op1.apply(src=src, dt=kwargs.pop('dt', self.dt))
        print("Injecting to empty grids finished")
        sp_zi = Dimension(name='sp_zi')

        sp_source_mask = Function(name='sp_source_mask',
                                  shape=(list(sparse_shape)),
                                  dimensions=(x, y, sp_zi),
                                  space_order=0,
                                  dtype=np.int32)

        # Now holds IDs
        sp_source_mask.data[inds[0], inds[1], :] = tuple(
            inds[-1][:len(np.unique(inds[-1]))])

        assert (np.count_nonzero(sp_source_mask.data) == len(nzinds[0]))
        assert (len(sp_source_mask.dimensions) == 3)

        # import pdb; pdb.set_trace()         .

        zind = Scalar(name='zind', dtype=np.int32)
        xb_size = Scalar(name='xb_size', dtype=np.int32)
        yb_size = Scalar(name='yb_size', dtype=np.int32)
        x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)
        y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)

        block_sizes = Function(name='block_sizes',
                               shape=(4, ),
                               dimensions=(b_dim, ),
                               space_order=0,
                               dtype=np.int32)

        bsizes = (8, 8, 32, 32)
        block_sizes.data[:] = bsizes

        # eqxb = Eq(xb_size, block_sizes[0])
        # eqyb = Eq(yb_size, block_sizes[1])
        # eqxb2 = Eq(x0_blk0_size, block_sizes[2])
        # eqyb2 = Eq(y0_blk0_size, block_sizes[3])

        eq0 = Eq(sp_zi.symbolic_max,
                 nnz_sp_source_mask[x, y] - 1,
                 implicit_dims=(time, x, y))
        # eq1 = Eq(zind, sp_source_mask[x, sp_zi], implicit_dims=(time, x, sp_zi))
        eq1 = Eq(zind,
                 sp_source_mask[x, y, sp_zi],
                 implicit_dims=(time, x, y, sp_zi))

        inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y,
                                                                     zind]]
        inj_v = source_mask[x, y, zind] * save_src_v[time, source_id[x, y,
                                                                     zind]]

        eq_u = Inc(u.forward[t + 1, x, y, zind],
                   inj_u,
                   implicit_dims=(time, x, y, sp_zi))
        eq_v = Inc(v.forward[t + 1, x, y, zind],
                   inj_v,
                   implicit_dims=(time, x, y, sp_zi))

        # The additional time-tiling equations
        # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)

        performance_map = np.array([[0, 0, 0, 0, 0]])

        bxstart = 4
        bxend = 17
        bystart = 4
        byend = 17
        bstep = 16

        txstart = 8
        txend = 9
        tystart = 8
        tyend = 9

        tstep = 16
        # Temporal autotuning
        for tx in range(txstart, txend, tstep):
            # import pdb; pdb.set_trace()
            for ty in range(tystart, tyend, tstep):
                for bx in range(bxstart, bxend, bstep):
                    for by in range(bystart, byend, bstep):

                        block_sizes.data[:] = [tx, ty, bx, by]

                        eqxb = Eq(xb_size, block_sizes[0])
                        eqyb = Eq(yb_size, block_sizes[1])
                        eqxb2 = Eq(x0_blk0_size, block_sizes[2])
                        eqyb2 = Eq(y0_blk0_size, block_sizes[3])

                        u.data[:] = 0
                        v.data[:] = 0
                        print("-----")
                        tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u,
                                 eq_v)

                        op_tt = self.op_fwd(kernel, save, tteqs)
                        summary_tt = op_tt.apply(u=u,
                                                 v=v,
                                                 dt=kwargs.pop('dt', self.dt),
                                                 **kwargs)
                        norm_tt_u = norm(u)
                        norm_tt_v = norm(v)
                        print("Norm u:", regnormu)
                        print("Norm v:", regnormv)
                        print("Norm(tt_u):", norm_tt_u)
                        print("Norm(tt_v):", norm_tt_v)

                        print(
                            "===Temporal blocking======================================"
                        )

                        performance_map = np.append(performance_map, [[
                            tx, ty, bx, by,
                            summary_tt.globals['fdlike'].gflopss
                        ]], 0)

                print(performance_map)
                # tids = np.unique(performance_map[:, 0])

                #for tid in tids:
                bids = np.where((performance_map[:, 0] == tx)
                                & (performance_map[:, 1] == ty))
                bx_data = np.unique(performance_map[bids, 2])
                by_data = np.unique(performance_map[bids, 3])
                gptss_data = performance_map[bids, 4]
                gptss_data = gptss_data.reshape(len(bx_data), len(by_data))

                fig, ax = plt.subplots()
                im = ax.imshow(gptss_data)
                pause(2)

                # We want to show all ticks...
                ax.set_xticks(np.arange(len(bx_data)))
                ax.set_yticks(np.arange(len(by_data)))
                # ... and label them with the respective list entries
                ax.set_xticklabels(bx_data)
                ax.set_yticklabels(by_data)

                ax.set_title(
                    "Gpts/s for fixed tile size. (Sweeping block sizes)")
                fig.tight_layout()

                fig.colorbar(im, ax=ax)
                # ax = sns.heatmap(gptss_data, linewidth=0.5)
                plt.savefig(
                    str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) +
                    ".pdf")

        if 0:
            cmap = plt.cm.get_cmap("viridis")
            values = u.data[0, :, :, :]
            vistagrid = pv.UniformGrid()
            vistagrid.dimensions = np.array(values.shape) + 1
            vistagrid.spacing = (1, 1, 1)
            vistagrid.origin = (0, 0, 0
                                )  # The bottom left corner of the data set
            vistagrid.cell_arrays["values"] = values.flatten(order="F")
            vistaslices = vistagrid.slice_orthogonal()
            vistagrid.plot(show_edges=True)
            vistaslices.plot(cmap=cmap)

        return rec, u, v, summary
Example #25
0
def chain_contractions(A, B, C, D, E, F, optimize):
    """``AB + AC = D, DE = F``."""
    op = Operator([Inc(D, A * B + A * C), Inc(F, D * E)], dle=optimize)
    op.apply()
    info('Executed `AB + AC = D, DE = F`')
Example #26
0
def mat_mat_sum(A, B, C, D, optimize):
    """``AB + AC = D``."""
    op = Operator(Inc(D, A * B + A * C), dle=optimize)
    op.apply()
    info('Executed `AB + AC = D`')
Example #27
0
def mat_mat(A, B, C, optimize):
    """``AB = C``."""
    op = Operator(Inc(C, A * B), dle=optimize)
    op.apply()
    info('Executed `AB = C`')
Example #28
0
def transpose_mat_vec(A, x, b, optimize):
    """``A -> A^T, A^Tx = b``."""
    i, j = A.indices
    op = Operator([Inc(b, A[j, i] * x)], dle=optimize)
    op.apply()
    info('Executed `A^Tx = b`')
Example #29
0
def mat_vec(A, x, b, optimize):
    """``Ax = b``."""
    op = Operator(Inc(b, A * x), dle=optimize)
    op.apply()
    info('Executed `Ax = b`')
Example #30
0
def fwi_gradient(vp_in):
    # AUTO
    grad = Function(name="grad", grid=model.grid)
    residual = Receiver(name='rec',
                        grid=model.grid,
                        time_range=geometry.time_axis,
                        coordinates=geometry.rec_positions)
    objective = 0.
    u0 = TimeFunction(name='u',
                      grid=model.grid,
                      time_order=2,
                      space_order=4,
                      save=geometry.nt)

    # MANUAL
    grad_manual = Function(name="grad", grid=model.grid)
    residual_man = Receiver(name='rec',
                            grid=model.grid,
                            time_range=time_axis,
                            coordinates=rec_coordinates)
    objective_manual = 0.
    u0_man = TimeFunction(name='u',
                          grid=model.grid,
                          time_order=2,
                          space_order=4,
                          save=nt)
    for i in range(9):
        # AUTO
        clear_cache()
        geometry.src_positions[0, :] = source_locations[i, :]
        true_d, _, _ = solver.forward(vp=model.vp)
        u0.data.fill(0.)
        smooth_d, _, _ = solver.forward(vp=vp_in, save=True, u=u0)
        residual.data[:] = smooth_d.data[:] - true_d.data[:]
        objective += .5 * np.linalg.norm(residual.data.flatten())**2
        solver.gradient(rec=residual, u=u0, vp=vp_in, grad=grad)

        # MANUAL
        # source
        src_true = RickerSource(name='src',
                                grid=model.grid,
                                time_range=time_axis,
                                coordinates=source_locations[i, :],
                                npoint=1,
                                f0=f0)
        src_term = src_true.inject(
            field=u.forward,
            expr=src_true * model.grid.stepping_dim.spacing**2 / model.m)

        # receiver
        rec_true = Receiver(name='rec',
                            grid=model.grid,
                            time_range=time_axis,
                            coordinates=rec_coordinates,
                            npoint=nreceivers)
        rec_term = rec_true.interpolate(expr=u)

        # operator
        op_fwd = Operator(eqn + src_term + rec_term,
                          subs=model.spacing_map,
                          name='Forward')
        op_fwd.apply(src=src_true,
                     rec=rec_true,
                     u=u0_man,
                     vp=model.vp,
                     dt=model.critical_dt)

        u0_man.data.fill(0.)
        rec_smooth = Receiver(name='rec',
                              grid=model.grid,
                              time_range=time_axis,
                              coordinates=rec_coordinates,
                              npoint=nreceivers)
        op_fwd.apply(src=src_true,
                     rec=rec_smooth,
                     u=u0_man,
                     vp=vp_in,
                     dt=model.critical_dt)

        # back-receiver
        rec_back = Receiver(name='rec',
                            grid=model.grid,
                            time_range=time_axis,
                            coordinates=rec_coordinates,
                            npoint=nreceivers)
        rec_back_term = rec_back.inject(
            field=v.backward,
            expr=rec_back * model.grid.stepping_dim.spacing**2 / model.m)

        # gradient
        gradient_update = Inc(grad_manual, -u.dt2 * v)
        op_grad = Operator(eqn_back + rec_back_term + [gradient_update],
                           subs=model.spacing_map,
                           name='Gradient')
        residual_man.data[:] = rec_smooth.data[:] - rec_true.data[:]
        objective_manual += .5 * np.linalg.norm(residual_man.data.flatten())**2
        op_grad.apply(rec=residual_man,
                      u=u0_man,
                      vp=vp_in,
                      dt=model.critical_dt,
                      grad=grad_manual)

        # sanity-check -> expect for 0!
        # plot_shotrecord(true_d.data[:] - rec_true.data[:], model, t0, tn)
        # plot_shotrecord(smooth_d.data[:] - rec_smooth.data[:], model, t0, tn)
    return objective, -grad.data, objective_manual, -grad_manual.data