Exemple #1
0
def test_geom(shape):
    vp = np.ones(shape)
    o = tuple([0] * len(shape))
    d = tuple([10] * len(shape))
    model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
    assert model.critical_dt == 1

    nrec = 31
    nsrc = 4
    rec_coordinates = np.ones((nrec, len(shape)))
    src_coordinates = np.ones((nsrc, len(shape)))
    geometry = AcquisitionGeometry(model,
                                   rec_coordinates,
                                   src_coordinates,
                                   t0=0.0,
                                   tn=250)
    assert geometry.grid == model.grid
    assert geometry.nrec == nrec
    assert geometry.nsrc == nsrc
    assert geometry.src_type is None

    assert geometry.rec.shape == (251, nrec)
    assert norm(geometry.rec) == 0
    assert geometry.src.shape == (251, nsrc)
    assert norm(geometry.new_src(src_type=None)) == 0
    assert norm(geometry.src) == 0

    rec2 = geometry.rec.resample(num=501)
    assert rec2.shape == (501, nrec)
    assert rec2.grid == model.grid

    assert geometry.new_rec(name="bonjour").name == "bonjour"
    assert geometry.new_src(name="bonjour").name == "bonjour"
Exemple #2
0
def test_default_geom(shape):
    vp = np.ones(shape)
    o = tuple([0] * len(shape))
    d = tuple([10] * len(shape))
    model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
    assert model.critical_dt == 1

    geometry = setup_geometry(model, 250)
    nrec = shape[0] * (shape[1] if len(shape) > 2 else 1)
    assert geometry.grid == model.grid
    assert geometry.nrec == nrec
    assert geometry.nsrc == 1
    assert geometry.src_type == "Ricker"

    assert geometry.rec.shape == (251, nrec)
    assert norm(geometry.rec) == 0
    assert geometry.src.shape == (251, 1)
    assert norm(geometry.new_src(src_type=None)) == 0

    rec2 = geometry.rec.resample(num=501)
    assert rec2.shape == (501, nrec)
    assert rec2.grid == model.grid

    assert geometry.new_rec(name="bonjour").name == "bonjour"
    assert geometry.new_src(name="bonjour").name == "bonjour"
Exemple #3
0
    def test_adjoint_F(self, shape, kernel, space_order, nbpml, save,
                       Eu, Erec, Ev, Esrca):
        """
        Unlike `test_adjoint_F` in test_adjoint.py, here we explicitly check the norms
        of all Operator-evaluated Functions. The numbers we check against are derived
        "manually" from sequential runs of test_adjoint::test_adjoint_F
        """
        tn = 500.  # Final time
        nrec = 130  # Number of receivers

        # Create solver from preset
        solver = acoustic_setup(shape=shape, spacing=[15. for _ in shape], kernel=kernel,
                                nbpml=nbpml, tn=tn, space_order=space_order, nrec=nrec,
                                preset='layers-isotropic', dtype=np.float64)
        # Run forward operator
        rec, u, _ = solver.forward(save=save)

        assert np.isclose(norm(u), Eu, rtol=Eu*1.e-8)
        assert np.isclose(norm(rec), Erec, rtol=Erec*1.e-8)

        # Run adjoint operator
        srca, v, _ = solver.adjoint(rec=rec)

        assert np.isclose(norm(v), Ev, rtol=Ev*1.e-8)
        assert np.isclose(norm(srca), Esrca, rtol=Esrca*1.e-8)

        # Adjoint test: Verify <Ax,y> matches  <x, A^Ty> closely
        term1 = inner(srca, solver.geometry.src)
        term2 = norm(rec)**2
        assert np.isclose((term1 - term2)/term1, 0., rtol=1.e-10)
Exemple #4
0
    def test_skewed_bounds(self, expr, expected, norm_u, norm_v):
        """Tests code generation on skewed indices."""
        grid = Grid(shape=(16, 16, 16))
        x, y, z = grid.dimensions
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)  # noqa
        v = TimeFunction(name='v', grid=grid)  # noqa
        eqn = eval(expr)
        # List comprehension would need explicit locals/globals mappings to eval
        op = Operator(eqn, opt=('blocking', {'skewing': True}))
        op.apply(time_M=5)
        iters = FindNodes(Iteration).visit(op)
        time_iter = [i for i in iters if i.dim.is_Time]
        assert len(time_iter) == 1

        bns, _ = assert_blocking(op, {'x0_blk0'})

        iters = FindNodes(Iteration).visit(bns['x0_blk0'])
        assert len(iters) == 5
        assert iters[0].dim.parent is x
        assert iters[1].dim.parent is y
        assert iters[4].dim is z
        assert iters[2].dim.parent is iters[0].dim
        assert iters[3].dim.parent is iters[1].dim

        assert iters[0].symbolic_min == (iters[0].dim.parent.symbolic_min +
                                         time)
        assert iters[0].symbolic_max == (iters[0].dim.parent.symbolic_max +
                                         time)
        assert iters[1].symbolic_min == (iters[1].dim.parent.symbolic_min +
                                         time)
        assert iters[1].symbolic_max == (iters[1].dim.parent.symbolic_max +
                                         time)

        assert iters[2].symbolic_min == iters[2].dim.symbolic_min
        assert iters[2].symbolic_max == MIN(
            iters[0].dim + iters[0].dim.symbolic_incr - 1,
            iters[0].dim.symbolic_max + time)
        assert iters[3].symbolic_min == iters[3].dim.symbolic_min
        assert iters[3].symbolic_max == MIN(
            iters[1].dim + iters[1].dim.symbolic_incr - 1,
            iters[1].dim.symbolic_max + time)
        assert iters[4].symbolic_min == (iters[4].dim.symbolic_min)
        assert iters[4].symbolic_max == (iters[4].dim.symbolic_max)
        skewed = [i.expr for i in FindNodes(Expression).visit(bns['x0_blk0'])]
        assert str(skewed[0]).replace(' ', '') == expected
        assert np.isclose(norm(u), norm_u, rtol=1e-5)
        assert np.isclose(norm(v), norm_v, rtol=1e-5)

        u.data[:] = 0
        v.data[:] = 0
        op2 = Operator(eqn, opt=('advanced'))
        op2.apply(time_M=5)
        assert np.isclose(norm(u), norm_u, rtol=1e-5)
        assert np.isclose(norm(v), norm_v, rtol=1e-5)
Exemple #5
0
def run(shape=(50, 50, 50),
        spacing=(20.0, 20.0, 20.0),
        tn=1000.0,
        space_order=4,
        kernel='OT2',
        nbl=40,
        full_run=False,
        fs=False,
        autotune=False,
        preset='layers-isotropic',
        checkpointing=False,
        **kwargs):

    solver = acoustic_setup(shape=shape,
                            spacing=spacing,
                            nbl=nbl,
                            tn=tn,
                            space_order=space_order,
                            kernel=kernel,
                            fs=fs,
                            preset=preset,
                            **kwargs)

    info("Applying Forward")
    # Whether or not we save the whole time history. We only need the full wavefield
    # with 'save=True' if we compute the gradient without checkpointing, if we use
    # checkpointing, PyRevolve will take care of the time history
    save = full_run and not checkpointing
    # Define receiver geometry (spread across x, just below surface)
    rec, u, summary = solver.forward(save=save, autotune=autotune)
    # print(norm(rec))
    print(norm(u))

    if preset == 'constant':
        # With  a new m as Constant
        v0 = Constant(name="v", value=2.0, dtype=np.float32)
        solver.forward(save=save, vp=v0)
        # With a new vp as a scalar value
        solver.forward(save=save, vp=2.0)

    if not full_run:
        return summary.gflopss, summary.oi, summary.timings, [rec, u.data]

    # Smooth velocity
    initial_vp = Function(name='v0',
                          grid=solver.model.grid,
                          space_order=space_order)
    smooth(initial_vp, solver.model.vp)
    dm = np.float32(initial_vp.data**(-2) - solver.model.vp.data**(-2))

    info("Applying Adjoint")
    solver.adjoint(rec, autotune=autotune)
    info("Applying Born")
    solver.jacobian(dm, autotune=autotune)
    info("Applying Gradient")
    solver.jacobian_adjoint(rec,
                            u,
                            autotune=autotune,
                            checkpointing=checkpointing)
    return summary.gflopss, summary.oi, summary.timings, [rec, u.data]
Exemple #6
0
def fwi_gradient(vp_in):
    # Create symbols to hold the gradient
    grad = Function(name="grad", grid=model.grid)
    objective = 0.
    for i in range(nshots):
        # Create placeholders for the data residual and data
        residual = Receiver(name='residual',
                            grid=model.grid,
                            time_range=geometry.time_axis,
                            coordinates=geometry.rec_positions)
        d_obs = Receiver(name='d_obs',
                         grid=model.grid,
                         time_range=geometry.time_axis,
                         coordinates=geometry.rec_positions)
        d_syn = Receiver(name='d_syn',
                         grid=model.grid,
                         time_range=geometry.time_axis,
                         coordinates=geometry.rec_positions)
        # Update source location
        solver.geometry.src_positions[0, :] = source_locations[i, :]

        # Generate synthetic data from true model
        solver.forward(vp=model.vp, rec=d_obs)

        # Compute smooth data and full forward wavefield u0
        _, u0, _ = solver.forward(vp=vp_in, save=True, rec=d_syn)

        # Compute gradient from data residual and update objective function
        residual = compute_residual(residual, d_obs, d_syn)

        objective += .5 * norm(residual)**2
        solver.gradient(rec=residual, u=u0, vp=vp_in, grad=grad)

    return objective, grad
    def test_adjoint_F(self, mkey, shape, kernel, space_order):
        """
        Adjoint test for the forward modeling operator.
        The forward modeling operator F generates a shot record (measurements)
        from a source while the adjoint of F generates measurments at the source
        location from data. This test uses the conventional dot test:
        < Fx, y> = <x, F^T y>
        """
        tn = 500.  # Final time

        # Create solver from preset
        solver = acoustic_setup(shape=shape, spacing=[15. for _ in shape], kernel=kernel,
                                nbl=10, tn=tn, space_order=space_order,
                                **(presets[mkey]), dtype=np.float64)

        # Create adjoint receiver symbol
        srca = Receiver(name='srca', grid=solver.model.grid,
                        time_range=solver.geometry.time_axis,
                        coordinates=solver.geometry.src_positions)

        # Run forward and adjoint operators
        rec, _, _ = solver.forward(save=False)
        solver.adjoint(rec=rec, srca=srca)

        # Adjoint test: Verify <Ax,y> matches  <x, A^Ty> closely
        term1 = np.dot(srca.data.reshape(-1), solver.geometry.src.data)
        term2 = norm(rec) ** 2
        info('<Ax,y>: %f, <x, A^Ty>: %f, difference: %4.4e, ratio: %f'
             % (term1, term2, (term1 - term2)/term1, term1 / term2))
        assert np.isclose((term1 - term2)/term1, 0., atol=1.e-12)
Exemple #8
0
def test_elastic_stability(shape):
    spacing = tuple([20] * len(shape))
    _, _, _, [rec1, rec2, v, tau] = run(shape=shape,
                                        spacing=spacing,
                                        tn=20000.0,
                                        nbl=0)
    assert np.isfinite(norm(rec1))
def test_isoacoustic_stability(shape, k):
    spacing = tuple([20] * len(shape))
    _, _, _, [rec, _] = run(shape=shape,
                            spacing=spacing,
                            tn=20000.0,
                            nbl=0,
                            kernel=k)
    assert np.isfinite(norm(rec))
Exemple #10
0
def test_tti_stability(shape, kernel):
    spacing = tuple([20] * len(shape))
    _, _, _, [rec, _, _] = run(shape=shape,
                               spacing=spacing,
                               kernel=kernel,
                               tn=16000.0,
                               nbl=0)
    assert np.isfinite(norm(rec))
Exemple #11
0
def test_viscoelastic_stability(ndim):
    shape = tuple([11] * ndim)
    spacing = tuple([20] * ndim)
    _, _, _, [rec1, rec2, v, tau] = run(shape=shape,
                                        spacing=spacing,
                                        tn=20000.0,
                                        nbl=0)
    assert np.isfinite(norm(rec1))
Exemple #12
0
def test_tti_stability(kernel, ndim):
    shape = tuple([11] * ndim)
    spacing = tuple([20] * ndim)
    _, _, _, [rec, _, _] = run(shape=shape,
                               spacing=spacing,
                               kernel=kernel,
                               tn=16000.0,
                               nbl=0)
    assert np.isfinite(norm(rec))
def test_viscoacoustic_stability(ndim, kernel):
    shape = tuple([11] * ndim)
    spacing = tuple([20] * ndim)
    _, _, _, [rec] = run(shape=shape,
                         spacing=spacing,
                         tn=20000.0,
                         nbl=0,
                         kernel=kernel)
    assert np.isfinite(norm(rec))
Exemple #14
0
    def iso_acoustic(self, opt):
        shape = (101, 101)
        extent = (1000, 1000)
        origin = (0., 0.)

        v = np.empty(shape, dtype=np.float32)
        v[:, :51] = 1.5
        v[:, 51:] = 2.5

        grid = Grid(shape=shape, extent=extent, origin=origin)

        t0 = 0.
        tn = 1000.
        dt = 1.6
        time_range = TimeAxis(start=t0, stop=tn, step=dt)

        f0 = 0.010
        src = RickerSource(name='src',
                           grid=grid,
                           f0=f0,
                           npoint=1,
                           time_range=time_range)

        domain_size = np.array(extent)

        src.coordinates.data[0, :] = domain_size * .5
        src.coordinates.data[0, -1] = 20.

        rec = Receiver(name='rec',
                       grid=grid,
                       npoint=101,
                       time_range=time_range)
        rec.coordinates.data[:, 0] = np.linspace(0, domain_size[0], num=101)
        rec.coordinates.data[:, 1] = 20.

        u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2)
        m = Function(name='m', grid=grid)
        m.data[:] = 1. / (v * v)

        pde = m * u.dt2 - u.laplace
        stencil = Eq(u.forward, solve(pde, u.forward))

        src_term = src.inject(field=u.forward, expr=src * dt**2 / m)
        rec_term = rec.interpolate(expr=u.forward)

        op = Operator([stencil] + src_term + rec_term,
                      opt=opt,
                      language='openmp')

        # Make sure we've indeed generated OpenMP offloading code
        assert 'omp target' in str(op)

        op(time=time_range.num - 1, dt=dt)

        assert np.isclose(norm(rec), 490.55, atol=1e-2, rtol=0)
Exemple #15
0
def run(problem, **kwargs):
    """
    A single run with a specific set of performance parameters.
    """
    setup = model_type[problem]['setup']
    options = {}

    time_order = kwargs.pop('time_order')[0]
    space_order = kwargs.pop('space_order')[0]
    autotune = kwargs.pop('autotune')
    options['autotune'] = autotune
    block_shapes = as_tuple(kwargs.pop('block_shape'))
    operator = kwargs.pop('operator', 'forward')

    # Should a specific block-shape be used? Useful if one wants to skip
    # the autotuning pass as a good block-shape is already known
    # Note: the following piece of code is horribly *hacky*, but it works for now
    for i, block_shape in enumerate(block_shapes):
        for n, level in enumerate(block_shape):
            for d, s in zip(['x', 'y', 'z'], level):
                options['%s%d_blk%d_size' % (d, i, n)] = s

    solver = setup(space_order=space_order, time_order=time_order, **kwargs)
    retval = run_op(solver, operator, **options)

    try:
        rank = MPI.COMM_WORLD.rank
    except AttributeError:
        # MPI not available
        rank = 0

    dumpfile = kwargs.pop('dump_summary')
    if dumpfile:
        if configuration['profiling'] != 'advanced':
            raise RuntimeError(
                "Must set DEVITO_PROFILING=advanced (or, alternatively, "
                "DEVITO_LOGGING=PERF) with --dump-summary")
        if rank == 0:
            with open(dumpfile, 'w') as f:
                summary = retval[-1]
                assert isinstance(summary, PerformanceSummary)
                f.write(str(summary.globals['fdlike']))

    dumpfile = kwargs.pop('dump_norms')
    if dumpfile:
        norms = [
            "'%s': %f" % (i.name, norm(i)) for i in retval[:-1]
            if isinstance(i, DiscreteFunction)
        ]
        if rank == 0:
            with open(dumpfile, 'w') as f:
                f.write("{%s}" % ', '.join(norms))

    return retval
Exemple #16
0
    def test_adjoint_J(self, mkey, shape, kernel, space_order, setup_func):
        """
        Adjoint test for the FWI Jacobian operator.
        The Jacobian operator J generates a linearized shot record (measurements)
        from a model perturbation dm while the adjoint of J generates the FWI gradient
        from an adjoint source (usually data residual). This test uses the conventional
        dot test:
        < Jx, y> = <x ,J^T y>
        """
        tn = 500.  # Final time
        nbl = 10 + space_order / 2
        spacing = tuple([10.] * len(shape))
        # Create solver from preset
        solver = setup_func(shape=shape,
                            spacing=spacing,
                            vp_bottom=2,
                            kernel=kernel,
                            nbl=nbl,
                            tn=tn,
                            space_order=space_order,
                            **(presets[mkey]),
                            dtype=np.float64)

        # Create initial model (m0) with a constant velocity throughout
        model0 = demo_model(**(presets[mkey]),
                            vp_top=1.5,
                            vp_bottom=1.5,
                            spacing=spacing,
                            space_order=space_order,
                            shape=shape,
                            nbl=nbl,
                            dtype=np.float64,
                            grid=solver.model.grid)

        # Compute initial born perturbation from m - m0
        dm = (solver.model.vp.data**(-2) - model0.vp.data**(-2))

        du = solver.jacobian(dm, model=model0)[0]

        # Compute the full bg field(s) & gradient from initial perturbation
        if setup_func is tti_setup:
            u0, v0 = solver.forward(save=True, model=model0)[1:-1]
            im, _ = solver.jacobian_adjoint(du, u0, v0, model=model0)
        else:
            u0 = solver.forward(save=True, model=model0)[1]
            im, _ = solver.jacobian_adjoint(du, u0, model=model0)

        # Adjoint test: Verify <Ax,y> matches  <x, A^Ty> closely
        term1 = np.dot(im.data.reshape(-1), dm.reshape(-1))
        term2 = norm(du)**2
        info('<x, J^Ty>: %f, <Jx,y>: %f, difference: %4.4e, ratio: %f' %
             (term1, term2, (term1 - term2) / term1, term1 / term2))
        assert np.isclose((term1 - term2) / term1, 0., atol=1.e-12)
Exemple #17
0
    def test_adjoint_J(self, shape, space_order):
        """
        Adjoint test for the FWI Jacobian operator.
        The Jacobian operator J generates a linearized shot record (measurements)
        from a model perturbation dm while the adjoint of J generates the FWI gradient
        from an adjoint source (usually data residual). This test uses the conventional
        dot test:
        < Jx, y> = <x ,J^T y>
        """
        tn = 500.  # Final time
        nbl = 10 + space_order / 2
        spacing = tuple([10.] * len(shape))
        # Create solver from preset
        solver = acoustic_setup(shape=shape,
                                spacing=spacing,
                                nlayers=2,
                                vp_bottom=2,
                                nbl=nbl,
                                tn=tn,
                                space_order=space_order,
                                preset='layers-isotropic',
                                dtype=np.float64)

        # Create initial model (m0) with a constant velocity throughout
        model0 = demo_model('layers-isotropic',
                            vp_top=1.5,
                            vp_bottom=1.5,
                            spacing=spacing,
                            space_order=space_order,
                            shape=shape,
                            nbl=nbl,
                            dtype=np.float64,
                            grid=solver.model.grid)

        # Compute the full wavefield u0
        _, u0, _ = solver.forward(save=True, vp=model0.vp)

        # Compute initial born perturbation from m - m0
        dm = (solver.model.vp.data**(-2) - model0.vp.data**(-2))

        du, _, _, _ = solver.born(dm, vp=model0.vp)

        # Compute gradientfrom initial perturbation
        im, _ = solver.gradient(du, u0, vp=model0.vp)

        # Adjoint test: Verify <Ax,y> matches  <x, A^Ty> closely
        term1 = np.dot(im.data.reshape(-1), dm.reshape(-1))
        term2 = norm(du)**2
        info('<Jx,y>: %f, <x, J^Ty>: %f, difference: %4.4e, ratio: %f' %
             (term1, term2, (term1 - term2) / term1, term1 / term2))
        assert np.isclose((term1 - term2) / term1, 0., atol=1.e-12)
Exemple #18
0
def run(problem, **kwargs):
    """
    A single run with a specific set of performance parameters.
    """
    setup = model_type[problem]['setup']
    options = {}

    time_order = kwargs.pop('time_order')[0]
    space_order = kwargs.pop('space_order')[0]
    autotune = kwargs.pop('autotune')
    block_shapes = as_tuple(kwargs.pop('block_shape'))

    # Should a specific block-shape be used? Useful if one wants to skip
    # the autotuning pass as a good block-shape is already known
    # Note: the following piece of code is horribly *hacky*, but it works for now
    for i, block_shape in enumerate(block_shapes):
        for n, level in enumerate(block_shape):
            for d, s in zip(['x', 'y', 'z'], level):
                options['%s%d_blk%d_size' % (d, i, n)] = s

    solver = setup(space_order=space_order, time_order=time_order, **kwargs)
    retval = solver.forward(autotune=autotune, **options)

    # With MPI, only rank0 writes to disk
    try:
        rank = MPI.COMM_WORLD.rank
    except TypeError:
        # MPI not available
        rank = 0
    if rank == 0:
        dumpfile = kwargs.pop('dump_summary')
        if dumpfile:
            with open(dumpfile, 'w') as f:
                summary = retval[-1]
                assert isinstance(summary, PerformanceSummary)
                f.write(str(summary.globals['fdlike']))

        dumpfile = kwargs.pop('dump_norms')
        if dumpfile:
            norms = [
                "'%s': %f" % (i.name, norm(i)) for i in retval[:-1]
                if isinstance(i, DiscreteFunction)
            ]
            with open(dumpfile, 'w') as f:
                f.write("{%s}" % ', '.join(norms))

    return retval
Exemple #19
0
def cli_run_jit_backdoor(problem, **kwargs):
    """`click` interface for the `run_jit_backdoor` mode in `benchmark.py`."""

    # Preset shared parameters
    kwargs['space_order'] = [12]
    kwargs['time_order'] = [2]
    kwargs['nbl'] = 10
    kwargs['spacing'] = (20.0, 20.0, 20.0)

    # Preset problem-specific parameters
    if problem == 'tti':
        kwargs['shape'] = (350, 350, 350)
        kwargs['tn'] = 50  # End time of the simulation in ms
        kwargs['dse'] = 'aggressive'

        # Reference norms for the output fields
        reference = {'rec': 66.417102, 'u': 30.707737, 'v': 30.707728}
    elif problem == 'acoustic':
        kwargs['shape'] = (492, 492, 492)
        kwargs['tn'] = 100  # End time of the simulation in ms
        kwargs['dse'] = 'advanced'

        # Reference norms for the output fields
        reference = {'rec': 184.526400, 'u': 151.545837}
    else:
        assert False

    # Dummy values as they will be unused
    kwargs['block_shape'] = []
    kwargs['autotune'] = 'off'

    retval = run_jit_backdoor(problem, **kwargs)

    if retval is not None:
        for i in retval:
            if isinstance(i, DiscreteFunction):
                v = norm(i)
                info(
                    "norm(%s) = %f (expected = %f, delta = %f)" %
                    (i.name, v, reference[i.name], abs(v - reference[i.name])))

    # Record DEVITO_ environment
    env = [(k, v) for k, v in os.environ.items() if k.startswith('DEVITO_')]
    content = "{%s}" % ", ".join("'%s': '%s'" % (k, v) for k, v in env)
    with open('env.py', 'w') as f:
        f.write(content)
Exemple #20
0
def run_jit_backdoor(problem, **kwargs):
    """
    A single run using the DEVITO_JIT_BACKDOOR to test kernel customization.
    """
    configuration['develop-mode'] = False

    setup = model_type[problem]['setup']

    time_order = kwargs.pop('time_order')[0]
    space_order = kwargs.pop('space_order')[0]
    autotune = kwargs.pop('autotune')

    info("Preparing simulation...")
    solver = setup(space_order=space_order, time_order=time_order, **kwargs)

    # Generate code (but do not JIT yet)
    op = solver.op_fwd()

    # Get the filename in the JIT cache
    cfile = "%s.c" % str(op._compiler.get_jit_dir().joinpath(op._soname))

    if not os.path.exists(cfile):
        # First time we run this problem, let's generate and jit-compile code
        op.cfunction
        info("You may now edit the generated code in `%s`. "
             "Then save the file, and re-run this benchmark." % cfile)
        return

    info("Running wave propagation Operator...")

    @switchconfig(jit_backdoor=True)
    def _run_jit_backdoor():
        return run_op(solver, 'forward', autotune=autotune)

    retval = _run_jit_backdoor()

    dumpnorms = kwargs.pop('dump_norms')
    if dumpnorms:
        for i in retval[:-1]:
            if isinstance(i, DiscreteFunction):
                info("'%s': %f" % (i.name, norm(i)))

    return retval
Exemple #21
0
    def forward(self,
                src=None,
                rec=None,
                u=None,
                v=None,
                vp=None,
                epsilon=None,
                delta=None,
                theta=None,
                phi=None,
                save=False,
                kernel='centered',
                **kwargs):
        """
        Forward modelling function that creates the necessary
        data objects for running a forward modelling operator.

        Parameters
        ----------
        geometry : AcquisitionGeometry
            Geometry object that contains the source (SparseTimeFunction) and
            receivers (SparseTimeFunction) and their position.
        u : TimeFunction, optional
            The computed wavefield first component.
        v : TimeFunction, optional
            The computed wavefield second component.
        vp : Function or float, optional
            The time-constant velocity.
        epsilon : Function or float, optional
            The time-constant first Thomsen parameter.
        delta : Function or float, optional
            The time-constant second Thomsen parameter.
        theta : Function or float, optional
            The time-constant Dip angle (radians).
        phi : Function or float, optional
            The time-constant Azimuth angle (radians).
        save : bool, optional
            Whether or not to save the entire (unrolled) wavefield.
        kernel : str, optional
            Type of discretization, centered or shifted.

        Returns
        -------
        Receiver, wavefield and performance summary.
        """
        if kernel == 'staggered':
            time_order = 1
            dims = self.model.space_dimensions
            stagg_u = (-dims[-1])
            stagg_v = (-dims[0],
                       -dims[1]) if self.model.grid.dim == 3 else (-dims[0])
        else:
            time_order = 2
            stagg_u = stagg_v = None
        # Source term is read-only, so re-use the default
        src = src or self.geometry.src
        # Create a new receiver object to store the result
        rec = rec or Receiver(name='rec',
                              grid=self.model.grid,
                              time_range=self.geometry.time_axis,
                              coordinates=self.geometry.rec_positions)
        # Create the forward wavefield if not provided

        if u is None:
            u = TimeFunction(name='u',
                             grid=self.model.grid,
                             staggered=stagg_u,
                             save=self.geometry.nt if save else None,
                             time_order=time_order,
                             space_order=self.space_order)
        # Create the forward wavefield if not provided
        if v is None:
            v = TimeFunction(name='v',
                             grid=self.model.grid,
                             staggered=stagg_v,
                             save=self.geometry.nt if save else None,
                             time_order=time_order,
                             space_order=self.space_order)

        print("Initial Norm u", norm(u))
        print("Initial Norm v", norm(v))

        if kernel == 'staggered':
            vx, vz, vy = particle_velocity_fields(self.model, self.space_order)
            kwargs["vx"] = vx
            kwargs["vz"] = vz
            if vy is not None:
                kwargs["vy"] = vy

        # Pick vp and Thomsen parameters from model unless explicitly provided
        kwargs.update(
            self.model.physical_params(vp=vp,
                                       epsilon=epsilon,
                                       delta=delta,
                                       theta=theta,
                                       phi=phi))
        if self.model.dim < 3:
            kwargs.pop('phi', None)
        # Execute operator and return wavefield and receiver data

        op = self.op_fwd(kernel, save)
        print(kwargs)
        summary = op.apply(src=src,
                           u=u,
                           v=v,
                           dt=kwargs.pop('dt', self.dt),
                           **kwargs)

        regnormu = norm(u)
        regnormv = norm(v)
        print("Norm u:", regnormu)
        print("Norm v:", regnormv)

        if 0:
            cmap = plt.cm.get_cmap("viridis")
            values = u.data[0, :, :, :]
            vistagrid = pv.UniformGrid()
            vistagrid.dimensions = np.array(values.shape) + 1
            vistagrid.spacing = (1, 1, 1)
            vistagrid.origin = (0, 0, 0
                                )  # The bottom left corner of the data set
            vistagrid.cell_arrays["values"] = values.flatten(order="F")
            vistaslices = vistagrid.slice_orthogonal()
            vistagrid.plot(show_edges=True)
            vistaslices.plot(cmap=cmap)

        print("=========================================")

        s_u = TimeFunction(name='s_u',
                           grid=self.model.grid,
                           space_order=self.space_order,
                           time_order=1)
        s_v = TimeFunction(name='s_v',
                           grid=self.model.grid,
                           space_order=self.space_order,
                           time_order=1)

        src_u = src.inject(field=s_u.forward,
                           expr=src * self.model.grid.time_dim.spacing**2 /
                           self.model.m)
        src_v = src.inject(field=s_v.forward,
                           expr=src * self.model.grid.time_dim.spacing**2 /
                           self.model.m)

        op_f = Operator([src_u, src_v])
        op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))

        print("Norm s_u", norm(s_u))
        print("Norm s_v", norm(s_v))

        # Get the nonzero indices
        nzinds = np.nonzero(s_u.data[0])  # nzinds is a tuple
        assert len(nzinds) == len(self.model.grid.shape)
        shape = self.model.grid.shape
        x, y, z = self.model.grid.dimensions
        time = self.model.grid.time_dim
        t = self.model.grid.stepping_dim

        source_mask = Function(name='source_mask',
                               shape=self.model.grid.shape,
                               dimensions=(x, y, z),
                               space_order=0,
                               dtype=np.int32)
        source_id = Function(name='source_id',
                             shape=shape,
                             dimensions=(x, y, z),
                             space_order=0,
                             dtype=np.int32)
        print("source_id data indexes start from 0 now !!!")

        # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))
        source_id.data[nzinds[0], nzinds[1],
                       nzinds[2]] = tuple(np.arange(len(nzinds[0])))

        source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1
        # plot3d(source_mask.data, model)
        # import pdb; pdb.set_trace()

        print("Number of unique affected points is: %d", len(nzinds[0]))

        # Assert that first and last index are as expected
        assert (source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)
        assert (source_id.data[nzinds[0][-1], nzinds[1][-1],
                               nzinds[2][-1]] == len(nzinds[0]) - 1)
        assert (source_id.data[nzinds[0][len(nzinds[0]) - 1],
                               nzinds[1][len(nzinds[0]) - 1],
                               nzinds[2][len(nzinds[0]) -
                                         1]] == len(nzinds[0]) - 1)

        assert (np.all(np.nonzero(source_id.data)) == np.all(
            np.nonzero(source_mask.data)))
        assert (np.all(np.nonzero(source_id.data)) == np.all(
            np.nonzero(s_u.data[0])))

        print(
            "-At this point source_mask and source_id have been popoulated correctly-"
        )

        nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])

        nnz_sp_source_mask = Function(name='nnz_sp_source_mask',
                                      shape=(list(nnz_shape)),
                                      dimensions=(x, y),
                                      space_order=0,
                                      dtype=np.int32)

        nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)
        inds = np.where(source_mask.data == 1.)
        print("Grid - source positions:", inds)
        maxz = len(np.unique(inds[-1]))
        # Change only 3rd dim
        sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1],
                        maxz)

        assert (len(
            nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions) -
                                               1))

        # Note : sparse_source_id is not needed as long as sparse info is kept in mask
        # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]

        id_dim = Dimension(name='id_dim')
        b_dim = Dimension(name='b_dim')

        save_src_u = TimeFunction(name='save_src_u',
                                  shape=(src.shape[0], nzinds[1].shape[0]),
                                  dimensions=(src.dimensions[0], id_dim))
        save_src_v = TimeFunction(name='save_src_v',
                                  shape=(src.shape[0], nzinds[1].shape[0]),
                                  dimensions=(src.dimensions[0], id_dim))

        save_src_u_term = src.inject(
            field=save_src_u[src.dimensions[0], source_id],
            expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)
        save_src_v_term = src.inject(
            field=save_src_v[src.dimensions[0], source_id],
            expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)

        print("Injecting to empty grids")
        op1 = Operator([save_src_u_term, save_src_v_term])
        op1.apply(src=src, dt=kwargs.pop('dt', self.dt))
        print("Injecting to empty grids finished")
        sp_zi = Dimension(name='sp_zi')

        sp_source_mask = Function(name='sp_source_mask',
                                  shape=(list(sparse_shape)),
                                  dimensions=(x, y, sp_zi),
                                  space_order=0,
                                  dtype=np.int32)

        # Now holds IDs
        sp_source_mask.data[inds[0], inds[1], :] = tuple(
            inds[-1][:len(np.unique(inds[-1]))])

        assert (np.count_nonzero(sp_source_mask.data) == len(nzinds[0]))
        assert (len(sp_source_mask.dimensions) == 3)

        # import pdb; pdb.set_trace()         .

        zind = Scalar(name='zind', dtype=np.int32)
        xb_size = Scalar(name='xb_size', dtype=np.int32)
        yb_size = Scalar(name='yb_size', dtype=np.int32)
        x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)
        y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)

        block_sizes = Function(name='block_sizes',
                               shape=(4, ),
                               dimensions=(b_dim, ),
                               space_order=0,
                               dtype=np.int32)

        bsizes = (8, 8, 32, 32)
        block_sizes.data[:] = bsizes

        # eqxb = Eq(xb_size, block_sizes[0])
        # eqyb = Eq(yb_size, block_sizes[1])
        # eqxb2 = Eq(x0_blk0_size, block_sizes[2])
        # eqyb2 = Eq(y0_blk0_size, block_sizes[3])

        eq0 = Eq(sp_zi.symbolic_max,
                 nnz_sp_source_mask[x, y] - 1,
                 implicit_dims=(time, x, y))
        # eq1 = Eq(zind, sp_source_mask[x, sp_zi], implicit_dims=(time, x, sp_zi))
        eq1 = Eq(zind,
                 sp_source_mask[x, y, sp_zi],
                 implicit_dims=(time, x, y, sp_zi))

        inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y,
                                                                     zind]]
        inj_v = source_mask[x, y, zind] * save_src_v[time, source_id[x, y,
                                                                     zind]]

        eq_u = Inc(u.forward[t + 1, x, y, zind],
                   inj_u,
                   implicit_dims=(time, x, y, sp_zi))
        eq_v = Inc(v.forward[t + 1, x, y, zind],
                   inj_v,
                   implicit_dims=(time, x, y, sp_zi))

        # The additional time-tiling equations
        # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)

        performance_map = np.array([[0, 0, 0, 0, 0]])

        bxstart = 4
        bxend = 17
        bystart = 4
        byend = 17
        bstep = 16

        txstart = 8
        txend = 9
        tystart = 8
        tyend = 9

        tstep = 16
        # Temporal autotuning
        for tx in range(txstart, txend, tstep):
            # import pdb; pdb.set_trace()
            for ty in range(tystart, tyend, tstep):
                for bx in range(bxstart, bxend, bstep):
                    for by in range(bystart, byend, bstep):

                        block_sizes.data[:] = [tx, ty, bx, by]

                        eqxb = Eq(xb_size, block_sizes[0])
                        eqyb = Eq(yb_size, block_sizes[1])
                        eqxb2 = Eq(x0_blk0_size, block_sizes[2])
                        eqyb2 = Eq(y0_blk0_size, block_sizes[3])

                        u.data[:] = 0
                        v.data[:] = 0
                        print("-----")
                        tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u,
                                 eq_v)

                        op_tt = self.op_fwd(kernel, save, tteqs)
                        summary_tt = op_tt.apply(u=u,
                                                 v=v,
                                                 dt=kwargs.pop('dt', self.dt),
                                                 **kwargs)
                        norm_tt_u = norm(u)
                        norm_tt_v = norm(v)
                        print("Norm u:", regnormu)
                        print("Norm v:", regnormv)
                        print("Norm(tt_u):", norm_tt_u)
                        print("Norm(tt_v):", norm_tt_v)

                        print(
                            "===Temporal blocking======================================"
                        )

                        performance_map = np.append(performance_map, [[
                            tx, ty, bx, by,
                            summary_tt.globals['fdlike'].gflopss
                        ]], 0)

                print(performance_map)
                # tids = np.unique(performance_map[:, 0])

                #for tid in tids:
                bids = np.where((performance_map[:, 0] == tx)
                                & (performance_map[:, 1] == ty))
                bx_data = np.unique(performance_map[bids, 2])
                by_data = np.unique(performance_map[bids, 3])
                gptss_data = performance_map[bids, 4]
                gptss_data = gptss_data.reshape(len(bx_data), len(by_data))

                fig, ax = plt.subplots()
                im = ax.imshow(gptss_data)
                pause(2)

                # We want to show all ticks...
                ax.set_xticks(np.arange(len(bx_data)))
                ax.set_yticks(np.arange(len(by_data)))
                # ... and label them with the respective list entries
                ax.set_xticklabels(bx_data)
                ax.set_yticklabels(by_data)

                ax.set_title(
                    "Gpts/s for fixed tile size. (Sweeping block sizes)")
                fig.tight_layout()

                fig.colorbar(im, ax=ax)
                # ax = sns.heatmap(gptss_data, linewidth=0.5)
                plt.savefig(
                    str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) +
                    ".pdf")

        if 0:
            cmap = plt.cm.get_cmap("viridis")
            values = u.data[0, :, :, :]
            vistagrid = pv.UniformGrid()
            vistagrid.dimensions = np.array(values.shape) + 1
            vistagrid.spacing = (1, 1, 1)
            vistagrid.origin = (0, 0, 0
                                )  # The bottom left corner of the data set
            vistagrid.cell_arrays["values"] = values.flatten(order="F")
            vistaslices = vistagrid.slice_orthogonal()
            vistagrid.plot(show_edges=True)
            vistaslices.plot(cmap=cmap)

        return rec, u, v, summary
ste = 0.9
stepx = (ste - stx) / int(np.sqrt(src.npoint))

src.coordinates.data[:, :2] = np.array(
    np.meshgrid(np.arange(stx, ste, stepx), np.arange(
        stx, ste, stepx))).T.reshape(-1, 2) * np.array(model.domain_size[:1])

src.coordinates.data[:, -1] = 20  # Depth is 20m

# f : perform source injection on an empty grid
f = TimeFunction(name="f", grid=model.grid, space_order=so, time_order=2)
src_f = src.inject(field=f.forward, expr=src * dt**2 / model.m)
# op_f = Operator([src_f], opt=('advanced', {'openmp': True}))
op_f = Operator([src_f])
op_f.apply(time=time_range.num - 1)
normf = norm(f)
print("==========")
print(normf)
print("===========")

# uref : reference solution
# uref = TimeFunction(name="uref", grid=model.grid, space_order=so, time_order=2)
# src_term_ref = src.inject(field=uref.forward, expr=src * dt**2 / model.m)
# pde_ref = model.m * uref.dt2 - uref.laplace + model.damp * uref.dt
# stencil_ref = Eq(uref.forward, solve(pde_ref, uref.forward))

#Get the nonzero indices
nzinds = np.nonzero(f.data[0])  # nzinds is a tuple
assert len(nzinds) == len(shape)

shape = model.grid.shape
def test_viscoacoustic(dtype):
    _, _, _, [rec] = run(dtype=dtype)
    assert np.isclose(norm(rec), 18.7749, atol=1e-3, rtol=0)
Exemple #24
0
def test_viscoelastic():
    _, _, _, [rec1, rec2, v, tau] = run()
    assert np.isclose(norm(rec1), 12.28040, atol=1e-3, rtol=0)
    assert np.isclose(norm(rec2), 0.312461, atol=1e-3, rtol=0)
def test_isoacoustic_stability(ndim):
    shape = tuple([11]*ndim)
    spacing = tuple([20]*ndim)
    _, _, _, [rec, _] = run(shape=shape, spacing=spacing, tn=20000.0, nbl=0)
    assert np.isfinite(norm(rec))
Exemple #26
0
def J_adjoint_checkpointing(model,
                            src_coords,
                            wavelet,
                            rec_coords,
                            recin,
                            space_order=8,
                            is_residual=False,
                            n_checkpoints=None,
                            maxmem=None,
                            return_obj=False,
                            isic=False,
                            ws=None,
                            t_sub=1):
    """
    Jacobian (adjoint fo born modeling operator) operator on a shot record
    as a source (i.e data residual). Outputs the gradient with Checkpointing.

    Parameters
    ----------
    model: Model
        Physical model
    src_coords: Array
        Coordiantes of the source(s)
    wavelet: Array
        Source signature
    rec_coords: Array
        Coordiantes of the receiver(s)
    recin: Array
        Receiver data
    space_order: Int (optional)
        Spatial discretization order, defaults to 8
    checkpointing: Bool
        Whether or not to use checkpointing
    n_checkpoints: Int
        Number of checkpoints for checkpointing
    maxmem: Float
        Maximum memory to use for checkpointing
    isic : Bool
        Whether or not to use ISIC imaging condition
    ws : Array
        Extended source spatial distribution
    is_residual: Bool
        Whether to treat the input as the residual or as the observed data

    Returns
    ----------
     Array
        Adjoint jacobian on the input data (gradient)
    """
    # Optimal checkpointing
    op_f, u, rec_g = forward(model,
                             src_coords,
                             rec_coords,
                             wavelet,
                             space_order=space_order,
                             return_op=True,
                             ws=ws)
    op, g, v = gradient(model,
                        recin,
                        rec_coords,
                        u,
                        space_order=space_order,
                        return_op=True,
                        isic=isic)

    nt = wavelet.shape[0]
    rec = Receiver(name='rec',
                   grid=model.grid,
                   ntime=nt,
                   coordinates=rec_coords)
    cp = DevitoCheckpoint([uu for uu in as_tuple(u)])
    if maxmem is not None:
        memsize = (cp.size * u.data.itemsize)
        n_checkpoints = int(np.floor(maxmem * 10**6 / memsize))
    # Op arguments
    uk = {uu.name: uu for uu in as_tuple(u)}
    vk = {**uk, **{vv.name: vv for vv in as_tuple(v)}}
    uk.update({'rcv%s' % as_tuple(u)[0].name: rec_g})
    vk.update({'src%s' % as_tuple(v)[0].name: rec})
    # Wrapped ops
    wrap_fw = CheckpointOperator(op_f, vp=model.vp, **uk)
    wrap_rev = CheckpointOperator(op, vp=model.vp, **vk)

    # Run forward
    wrp = Revolver(cp, wrap_fw, wrap_rev, n_checkpoints, nt - 2)
    wrp.apply_forward()

    # Residual and gradient
    if is_residual is True:  # input data is already the residual
        rec.data[:] = recin[:]
    else:
        rec.data[:] = rec.data[:] - recin[:]  # input is observed data

    wrp.apply_reverse()

    if return_obj:
        return .5 * model.critical_dt * norm(rec)**2, g.data
    return g.data
def test_viscoacoustic(kernel, time_order, normrec, atol):
    _, _, _, [rec, _, _] = run(kernel=kernel, time_order=time_order)
    assert np.isclose(norm(rec), normrec, atol=atol, rtol=0)
Exemple #28
0
              dm=dm, rho=rho, dt=dt_full)

# Time axis
t0 = 0.
tn = 300.
dt = model.critical_dt
time = TimeAxis(start=t0, step=dt_full, stop=tn)
print(time.num)
src = RickerSource(name='src', grid=model.grid, f0=0.020, time_range=time, npoint=1)
src.coordinates.data[:, 0] = 5000.
src.coordinates.data[:, 1] = 350.

nrec = 501
rec_coords = np.empty((nrec, 2))
rec_coords[:, 0] = np.linspace(0., 10000., nrec)
rec_coords[:, 1] = 6.
####### RUN  #########
# Forward
op = TTIPropagators(model, space_order=so)

rec, u, v = op.forward(src, rec_coords, save=True)
grad = op.gradient(rec, u, v, isic=True)

linD, u, v, summary = op.born(src, rec_coords, sub=(4, 1), autotune=('aggressive', 'runtime'))
grad2 = op.gradient(linD, u, v, sub=(4, 1), isic=True, autotune=('basic', 'runtime'))

print(norm(grad))
print(norm(grad2))
print(norm(rec))
print(norm(linD))
Exemple #29
0
def test_isoacoustic(fs, normrec, dtype):
    _, _, _, [rec, _] = run(fs=fs, dtype=dtype)
    assert np.isclose(norm(rec), normrec, rtol=1e-3, atol=0)
Exemple #30
0
def test_elastic(dtype):
    _, _, _, [rec1, rec2, v, tau] = run(dtype=dtype)
    assert np.isclose(norm(rec1), 19.25636, atol=1e-3, rtol=0)
    assert np.isclose(norm(rec2), 0.627606, atol=1e-3, rtol=0)