Esempio n. 1
0
File: run.py Progetto: keshava/daks
def initial_setup(filename, tn, dtype, space_order, nbl, datakey="m0", exclude_boundaries=True, water_depth=20):
    model = overthrust_model_iso(filename, datakey=datakey, dtype=dtype, space_order=space_order, nbl=nbl)

    geometry = create_geometry(model, tn)
    nbl = model.nbl

    if exclude_boundaries:
        v = trim_boundary(model.vp, model.nbl)
    else:
        v = model.vp.data

    # Define physical constraints on velocity - we know the maximum and minimum velocities we are expecting
    vmax = np.ones(v.shape) * 6.5
    vmin = np.ones(v.shape) * 1.3

    # Constrain the velocity for the water region. We know the velocity of water beforehand.
    if exclude_boundaries:
        vmax[:, 0:water_depth] = v[:, 0:water_depth]
        vmin[:, 0:water_depth] = v[:, 0:water_depth]
    else:
        vmax[:, 0:water_depth+nbl] = v[:, 0:water_depth+nbl]
        vmin[:, 0:water_depth+nbl] = v[:, 0:water_depth+nbl]

    b = Bounds(mat2vec(vmin), mat2vec(vmax))

    return model, geometry, b
Esempio n. 2
0
def verify_equivalence():

    model, geometry, _ = initial_setup()
    result1 = fwi_gradient_checkpointed(mat2vec(model.vp.data), model,
                                        geometry)

    result2 = fwi_gradient(mat2vec(model.vp.data), model, geometry)

    for r1, r2 in zip(result1, result2):
        np.testing.assert_allclose(r2, r1, rtol=0.01, atol=1e-8)
Esempio n. 3
0
def basic_gradient_test(wave, space_order, v0, v, rec, F0, gradient, dm):

    G = np.dot(mat2vec(gradient.data), dm.reshape(-1))

    # FWI Gradient test
    H = [0.5, 0.25, .125, 0.0625, 0.0312, 0.015625, 0.0078125]
    error1 = np.zeros(7)
    error2 = np.zeros(7)
    for i in range(0, 7):
        # Add the perturbation to the model
        def initializer(data):
            data[:] = np.sqrt(v0**2 * v**2 /
                              ((1 - H[i]) * v**2 + H[i] * v0**2))

        vloc = Function(name='vloc',
                        grid=wave.model.grid,
                        space_order=space_order,
                        initializer=initializer)
        # Data for the new model
        d = wave.forward(vp=vloc, dt=wave.model.critical_dt)[0]
        # First order error Phi(m0+dm) - Phi(m0)
        F_i = .5 * linalg.norm((d.data - rec.data).reshape(-1))**2
        error1[i] = np.absolute(F_i - F0)
        # Second order term r Phi(m0+dm) - Phi(m0) - <J(m0)^T \delta d, dm>
        error2[i] = np.absolute(F_i - F0 - H[i] * G)

    # Test slope of the  tests
    p1 = np.polyfit(np.log10(H), np.log10(error1), 1)
    p2 = np.polyfit(np.log10(H), np.log10(error2), 1)
    info('1st order error, Phi(m0+dm)-Phi(m0): %s' % (p1))
    info(r'2nd order error, Phi(m0+dm)-Phi(m0) - <J(m0)^T \delta d, dm>: %s' %
         (p2))
    assert np.isclose(p1[0], 1.0, rtol=0.1)
    assert np.isclose(p2[0], 2.0, rtol=0.1)
Esempio n. 4
0
def initial_setup(filename, tn, dtype, space_order, nbl, datakey="m0"):
    model = overthrust_model_iso(filename,
                                 datakey=datakey,
                                 dtype=dtype,
                                 space_order=space_order,
                                 nbl=nbl)

    geometry = create_geometry(model, tn)

    clipped_model = clip_boundary_and_numpy(model.vp, model.nbl)
    vmax = np.ones(clipped_model.shape) * 6.5
    vmin = np.ones(clipped_model.shape) * 1.3

    vmax[:, 0:20] = clipped_model[:, 0:20]
    vmin[:, 0:20] = clipped_model[:, 0:20]
    b = Bounds(mat2vec(vmin), mat2vec(vmax))

    return model, geometry, b
Esempio n. 5
0
def test_vec2mat():
    shape = (2, 2, 2)
    vec = np.arange(8)
    mat = vec.reshape(shape)

    assert (np.array_equal(mat, vec2mat(vec, shape)))

    back = mat2vec(mat)

    assert (np.array_equal(back, vec))
Esempio n. 6
0
def fwi_gradient(vp_in, model, geometry, nshots, client, solver_params):
    start_time = time.time()
    vp_in = vec2mat(vp_in, model.shape)
    f_vp_in = client.scatter(vp_in)  # Dask enforces this for large arrays
    assert (model.shape == vp_in.shape)

    futures = []

    for i in range(nshots):
        futures.append(
            client.submit(
                fwi_gradient_shot,
                f_vp_in,
                i,
                solver_params,
                resources={
                    'tasks': 1
                }))  # Ensure one task per worker (to run two, tasks=0.5)

    shape = model.shape

    def reduction(*args):
        grad = np.zeros(shape)  # Closured from above
        objective = 0.

        for a in args:
            o, g = a
            objective += o
            grad += g
        return objective, grad

    reduce_future = client.submit(reduction, *futures)
    wait(reduce_future)

    objective, grad = reduce_future.result()
    elapsed_time = time.time() - start_time
    print("Objective function evaluation completed in %f seconds" %
          elapsed_time)

    # Scipy LBFGS misbehaves if type is not float64
    grad = mat2vec(np.array(grad)).astype(np.float64)

    return objective, grad
def pose_generator(base_dir, batch_size=64):
    image_list = []
    stamp_list = []
    pose_list = []

    for b in base_dir:
        with open(b + 'associated_poses.txt') as img_list:
            for line in img_list:
                line = line.rstrip('\n')
                timestamp, rgb, _, tx, ty, tz, r1, r2, r3, r4 = line.split(' ')
                image_list.append(b + rgb)
                stamp_list.append(float(timestamp))
                pose_list.append(pose_read([tx, ty, tz, r1, r2, r3, r4]))
    indexes = [i for i in range(len(image_list))]
    while True:
        indices = np.random.choice(a=indexes, size=batch_size)
        batch_input0 = []
        batch_input1 = []
        batch_gt = []

        for i in indices:
            skip = 1  # randint(1, 6)
            if i + skip > len(image_list) - 1:
                t0 = i - skip
                t1 = i
            elif abs(stamp_list[i] - stamp_list[i + skip]) > 10.0:
                t0 = i
                t1 = i - skip
            else:
                t0 = i
                t1 = i + skip

            img0 = img_read(image_list[t0], aug=False)
            img1 = img_read(image_list[t1], aug=False)
            batch_input0.append(img0)
            batch_input1.append(img1)
            batch_gt.append(mat2vec(ominus(pose_list[t1], pose_list[t0]), mode='e'))

        batch_input0 = np.array(batch_input0)
        batch_input1 = np.array(batch_input1)
        batch_gt = np.array(batch_gt)

        yield [batch_input0, batch_input1], batch_gt
Esempio n. 8
0
def gradient_test_errors(solver, rec, F0, gradient):
    true_model_filename = "overthrust_3D_true_model_2D.h5"
    initial_model_filename = "overthrust_3D_initial_model_2D.h5"
    dtype = solver.model.dtype
    so = solver.space_order
    nbl = solver.model.nbl

    model_t = overthrust_model_iso(true_model_filename, datakey="m",
                                   dtype=dtype, space_order=so, nbl=nbl)
    model0 = overthrust_model_iso(initial_model_filename, datakey="m0",
                                  dtype=dtype, space_order=so, nbl=nbl)
    v = model_t.vp
    v0 = model0.vp
    dm = np.float64(v.data**(-2) - v0.data**(-2))
    print("dm", np.linalg.norm(dm))
    G = np.dot(mat2vec(gradient.data), dm.reshape(-1))
    print("G", G)
    # FWI Gradient test
    H = [0.5, 0.25, .125, 0.0625, 0.0312, 0.015625, 0.0078125]
    error1 = np.zeros(7)
    error2 = np.zeros(7)
    for i in range(0, 7):
        # Add the perturbation to the model
        def initializer(data):
            data[:] = np.sqrt(v0.data**2 * v.data**2 /
                              ((1 - H[i]) * v.data**2 + H[i] * v0.data**2))
        vloc = Function(name='vloc', grid=solver.model.grid, space_order=so,
                        initializer=initializer)

        # Data for the new model
        d = solver.forward(vp=vloc, dt=solver.model.critical_dt)[0]
        # First order error Phi(m0+dm) - Phi(m0)
        F_i = .5*np.linalg.norm((d.data - rec.data).reshape(-1))**2
        print("F%d" % i, F_i)
        error1[i] = np.absolute(F_i - F0)
        # Second order term r Phi(m0+dm) - Phi(m0) - <J(m0)^T \delta d, dm>
        error2[i] = np.absolute(F_i - F0 - H[i] * G)

    return error1, error2, H
Esempio n. 9
0
def test_equivalence_local_remote_single_shot(shots_container):
    initial_model_filename, tn, dtype, so, nbl = "overthrust_3D_initial_model_2D.h5", 4000, np.float32, 6, 40
    model, _, bounds = initial_setup(filename=Blob("models",
                                                   initial_model_filename),
                                     tn=tn,
                                     dtype=dtype,
                                     space_order=so,
                                     nbl=nbl)

    solver_params = {
        'h5_file': Blob("models", initial_model_filename),
        'tn': tn,
        'space_order': so,
        'dtype': dtype,
        'datakey': 'm0',
        'nbl': nbl
    }

    solver = overthrust_solver_iso(**solver_params)

    v0 = mat2vec(model.vp.data).astype(np.float64)

    local_results = fwi_gradient_local(v0, 1, solver, shots_container)

    client = setup_dask()

    remote_results = fwi_gradient(v0,
                                  1,
                                  client,
                                  solver,
                                  shots_container,
                                  exclude_boundaries=False,
                                  scale_gradient=False,
                                  mute_water=False)

    np.testing.assert_approx_equal(local_results[0], remote_results[0])

    np.testing.assert_array_almost_equal(local_results[1], remote_results[1])
Esempio n. 10
0
def fwi_gradient_local(vp_in, nshots, solver, shots_container):
    model = solver.model

    vp_in = np.array(vec2mat(vp_in, solver.model.vp.shape),
                     dtype=solver.model.dtype)

    assert (model.vp.shape == vp_in.shape)

    solver.model.update("vp", vp_in)

    objective = 0.

    grad = np.zeros(model.vp.shape)

    for i in range(nshots):
        o, g = process_shot(i,
                            solver,
                            shots_container,
                            exclude_boundaries=False)
        objective += o
        grad += g

    return objective, -mat2vec(grad).astype(np.float64)
Esempio n. 11
0
File: run.py Progetto: keshava/daks
def run(initial_model_filename, results_dir, tn, nshots, shots_container, so, nbl, kernel, scale_gradient, max_iter,
        checkpointing, n_checkpoints, compression, tolerance, reference_solution, dtype):

    if dtype == 'float32':
        dtype = np.float32
    elif dtype == 'float64':
        dtype = np.float64
    else:
        raise ValueError("Invalid dtype")

    water_depth = 20  # Number of points at the top of the domain that correspond to water
    exclude_boundaries = True  # Exclude the boundary regions from the optimisation problem
    mute_water = True  # Mute the gradient in the water region

    initial_model_filename, datakey = initial_model_filename

    model, geometry, bounds = initial_setup(initial_model_filename, tn, dtype, so, nbl,
                                            datakey=datakey, exclude_boundaries=exclude_boundaries, water_depth=water_depth)

    client = setup_dask()

    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    intermediates_dir = os.path.join(results_dir, "intermediates")

    if not os.path.exists(intermediates_dir):
        os.mkdir(intermediates_dir)

    progress_dir = os.path.join(results_dir, "progress")

    if not os.path.exists(progress_dir):
        os.mkdir(progress_dir)

    auth = default_auth()

    solver_params = {'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn,
                     'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl,
                     'opt': ('noop', {'openmp': True, 'par-dynamic-work': 1000})}

    if kernel in ['OT2', 'OT4']:
        solver_params['kernel'] = kernel
        solver = overthrust_solver_iso(**solver_params)
    elif kernel == "rho":
        solver_params['water_depth'] = water_depth
        solver_params['calculate_density'] = False
        solver = overthrust_solver_density(**solver_params)
    solver._dt = 1.75
    solver.geometry.resample(1.75)

    f_args = [nshots, client, solver, shots_container, auth, scale_gradient, mute_water, exclude_boundaries, water_depth]

    if checkpointing:
        f_args += [checkpointing, {'n_checkpoints': n_checkpoints, 'scheme': compression,
                                   'tolerance': tolerance}]
    if exclude_boundaries:
        v0 = mat2vec(trim_boundary(model.vp, model.nbl)).astype(np.float64)
    else:
        v0 = mat2vec(model.vp).astype(np.float64)

    def callback(progress_dir, intermediates_dir, model, exclude_boundaries, vec):
        global plot_model_to_file
        callback.call_count += 1

        if not hasattr(callback, "obj_fn_history"):
            callback.obj_fn_history = []

        callback.obj_fn_history.append(fwi_gradient.obj_fn_cache[vec.tobytes()])

        fwi_iteration = callback.call_count
        filename = os.path.join(intermediates_dir, "solution%d.h5" % fwi_iteration)
        if exclude_boundaries:
            to_hdf5(vec2mat(vec, model.shape), filename)
        else:
            to_hdf5(vec2mat(vec, model.vp.shape), filename)

        progress_filename = os.path.join(progress_dir, "fwi-iter%d.pdf" % (fwi_iteration))
        plot_model_to_file(solver.model, progress_filename)

    callback.call_count = 0

    partial_callback = partial(callback, progress_dir, intermediates_dir, model, exclude_boundaries)

    fwi_gradient.call_count = 0
    fwd_op = solver.op_fwd(save=False)
    rev_op = solver.op_grad(save=False)
    fwd_op.ccode
    rev_op.ccode

    solution_object = minimize(fwi_gradient,
                               v0,
                               args=tuple(f_args),
                               jac=True, method='L-BFGS-B',
                               callback=partial_callback, bounds=bounds,
                               options={'disp': True, 'maxiter': max_iter})

    if exclude_boundaries:
        final_model = vec2mat(solution_object.x, model.shape)
    else:
        final_model = vec2mat(solution_object.x, model.vp.shape)

    solver.model.update("vp", final_model)

    # Save plot of final model
    final_plot_filename = os.path.join(results_dir, "final_model.pdf")
    plot_model_to_file(solver.model, final_plot_filename)

    # Save objective function values to CSV
    obj_fn_history = callback.obj_fn_history
    obj_fn_vals_filename = os.path.join(results_dir, "objective_function_values.csv")
    with open(obj_fn_vals_filename, "w") as vals_file:
        vals_writer = csv.writer(vals_file)
        for r in obj_fn_history:
            vals_writer.writerow([r])

    # Plot objective function values
    plt.title("FWI convergence")
    plt.xlabel("Iteration number")
    plt.ylabel("Objective function value")
    obj_fun_plt_filename = os.path.join(results_dir, "convergence.tex")
    obj_fun_plt_pdf_filename = os.path.join(results_dir, "convergence.pdf")

    plt.clf()
    if reference_solution is None:
        plt.plot(obj_fn_history)
    else:
        # Load reference solution convergence history
        with open(reference_solution, 'r') as reference_file:
            vals_reader = csv.reader(reference_file)
            reference_solution_values = []
            for r in vals_reader:
                reference_solution_values.append(float(r[0]))
        # Pad with 0s to ensure same size

        # Plot with legends
        plt.plot(obj_fn_history, label="Lossy FWI")
        plt.plot(reference_solution_values, label="Reference FWI")
        # Display legend
        plt.legend()
    plt.savefig(obj_fun_plt_pdf_filename)
    tikzplotlib.save(obj_fun_plt_filename)

    true_model = overthrust_model_iso("overthrust_3D_true_model_2D.h5", datakey="m", dtype=dtype, space_order=so, nbl=nbl)
    true_model_vp = trim_boundary(true_model.vp, true_model.nbl)

    error_norm = np.linalg.norm(true_model_vp - final_model)
    print("L2 norm of final solution vs true solution: %f" % error_norm)

    data = {'error_norm': error_norm,
            'checkpointing': checkpointing,
            'compression': compression,
            'tolerance': tolerance,
            'ncp': n_checkpoints}

    write_results(data, "fwi_experiment.csv")

    # Final solution
    final_solution_filename = os.path.join(results_dir, "final_solution.h5")
    to_hdf5(final_model, final_solution_filename)
Esempio n. 12
0
File: run.py Progetto: keshava/daks
def fwi_gradient(vp_in, nshots, client, solver, shots_container, auth, scale_gradient=None, mute_water=True,
                 exclude_boundaries=True, water_depth=20, checkpointing=False, checkpoint_params=None):
    start_time = time.time()

    reset_cluster(client)

    if not hasattr(fwi_gradient, "obj_fn_cache"):
        fwi_gradient.obj_fn_cache = {}

    if exclude_boundaries:
        vp = np.array(vec2mat(vp_in, solver.model.shape), dtype=solver.model.dtype)
    else:
        vp = np.array(vec2mat(vp_in, solver.model.vp.shape), dtype=solver.model.dtype)

    solver.model.update("vp", vp)

    # Dask enforces this for large objects
    f_solver = client.scatter(solver, broadcast=True)

    futures = []

    for i in range(nshots):
        if checkpointing:
            futures.append(client.submit(process_shot_checkpointed, i, f_solver, shots_container, auth, exclude_boundaries,
                                         checkpoint_params, resources={'tasks': 1}))
        else:
            futures.append(client.submit(process_shot, i, f_solver, shots_container, auth, exclude_boundaries,
                                         resources={'tasks': 1}))  # Ensure one task per worker (to run two, tasks=0.5)

    if exclude_boundaries:
        gradient_shape = solver.model.shape
    else:
        gradient_shape = solver.model.vp.shape

    def reduction(*args):
        grad = np.zeros(gradient_shape)  # Closured from above
        objective = 0.

        for a in args:
            o, g = a
            objective += o
            grad += g
        return objective, grad

    reduce_future = client.submit(reduction, *futures)

    wait(reduce_future)

    objective, grad = reduce_future.result()

    if mute_water:
        if exclude_boundaries:
            muted_depth = water_depth
        else:
            muted_depth = water_depth + solver.model.nbl
        grad[:, 0:muted_depth] = 0

    # Scipy LBFGS misbehaves if type is not float64
    grad = mat2vec(grad).astype(np.float64)

    if scale_gradient is not None:
        if scale_gradient == "W":
            if not hasattr(fwi_gradient, "gradient_scaling_factor"):
                fwi_gradient.gradient_scaling_factor = np.max(np.abs(grad))

            grad /= fwi_gradient.gradient_scaling_factor
        elif scale_gradient == "L":
            grad /= np.max(np.abs(grad))
        else:
            raise ValueError("Invalid value %s for gradient scaling. Allowed: None, L, W" % scale_gradient)

    fwi_gradient.obj_fn_cache[vp_in.tobytes()] = objective

    elapsed_time = time.time() - start_time
    eprint("Objective function evaluation completed in %f seconds. F=%f" % (elapsed_time, objective))

    return objective, -grad
Esempio n. 13
0
def test_equivalence_checkpointing(shots_container, exclude_boundaries,
                                   scale_gradient, mute_water):
    initial_model_filename = "overthrust_3D_initial_model_2D.h5"
    tn = 4000
    dtype = np.float32
    so = 6
    nbl = 40
    water_depth = 20
    nshots = 1
    client = setup_dask()

    solver_params = {
        'h5_file': Blob("models", initial_model_filename),
        'tn': tn,
        'space_order': so,
        'dtype': dtype,
        'datakey': 'm0',
        'nbl': nbl
    }

    solver = overthrust_solver_iso(**solver_params)

    model, geometry, _ = initial_setup(initial_model_filename,
                                       tn,
                                       dtype,
                                       so,
                                       nbl,
                                       datakey="m0",
                                       exclude_boundaries=exclude_boundaries,
                                       water_depth=water_depth)

    if exclude_boundaries:
        v0 = mat2vec(np.array(trim_boundary(model.vp,
                                            model.nbl))).astype(np.float64)
    else:
        v0 = mat2vec(model.vp.data).astype(np.float64)

    o1, grad1 = fwi_gradient(v0,
                             nshots,
                             client,
                             solver,
                             shots_container,
                             scale_gradient,
                             mute_water,
                             exclude_boundaries,
                             water_depth,
                             checkpointing=True)

    o2, grad2 = fwi_gradient(v0, nshots, client, solver, shots_container,
                             scale_gradient, mute_water, exclude_boundaries,
                             water_depth)
    print(o1, np.linalg.norm(grad1), grad1.shape)
    print(o2, np.linalg.norm(grad2), grad2.shape)

    grad1_diag = [grad1[k, k] for k in range(40)]
    grad2_diag = [grad2[k, k] for k in range(40)]

    print(grad1_diag)
    print(grad2_diag)

    np.testing.assert_approx_equal(o1, o2, significant=5)

    np.testing.assert_array_almost_equal(grad1, grad2, significant=5)
Esempio n. 14
0
def run(initial_model_filename, final_solution_basename, tn, nshots,
        shots_container, so, nbl, kernel, checkpointing, n_checkpoints,
        compression, tolerance):
    dtype = np.float32
    model, geometry, bounds = initial_setup(initial_model_filename, tn, dtype,
                                            so, nbl)

    solver_params = {
        'filename': initial_model_filename,
        'tn': tn,
        'space_order': so,
        'dtype': dtype,
        'datakey': 'm0',
        'nbl': nbl,
        'origin': model.origin,
        'spacing': model.spacing,
        'shots_container': shots_container
    }

    client = setup_dask()

    f_args = [model, geometry, nshots, client, solver_params]

    # if checkpointing:
    #    f_g = fwi_gradient_checkpointed
    #    compression_params = {'scheme': compression, 'tolerance': 10**(-tolerance)}
    #    f_args.append(n_checkpoints)
    #    f_args.append(compression_params)
    # else:

    f_g = fwi_gradient

    clipped_vp = mat2vec(clip_boundary_and_numpy(model.vp.data, model.nbl))

    def callback(final_solution_basename, vec):
        callback.call_count += 1
        fwi_iteration = callback.call_count
        filename = "%s_%d.h5" % (final_solution_basename, fwi_iteration)
        with profiler.get_timer('io', 'write_progress'):
            to_hdf5(vec2mat(vec, model.shape), filename)
        print(profiler.summary())

    callback.call_count = 0

    partial_callback = partial(callback, final_solution_basename)

    solution_object = minimize(f_g,
                               clipped_vp,
                               args=tuple(f_args),
                               jac=True,
                               method='L-BFGS-B',
                               callback=partial_callback,
                               bounds=bounds,
                               options={
                                   'disp': True,
                                   'maxiter': 60
                               })

    final_model = vec2mat(solution_object.x, model.shape)

    true_model = overthrust_model_iso("overthrust_3D_true_model_2D.h5",
                                      datakey="m",
                                      dtype=dtype,
                                      space_order=so,
                                      nbl=nbl)
    true_model_vp = clip_boundary_and_numpy(true_model.vp.data, true_model.nbl)

    error_norm = np.linalg.norm(true_model_vp - final_model)
    print(error_norm)

    data = {
        'error_norm': error_norm,
        'checkpointing': checkpointing,
        'compression': compression,
        'tolerance': tolerance,
        'ncp': n_checkpoints
    }

    write_results(data, "fwi_experiment.csv")

    to_hdf5(final_model, '%s_final.h5' % final_solution_basename)
def end2end():
    v = VO(input_shape=(1, 480, 640, 3), mode='test')
    v.model_from_file(conf['end2end_for_pred'])
    # v.save_as_json(conf['end2end_for_pred'])
    v.load_weights([conf['end2end_weights']])

    #  rgb/1341847981.358690.png  -0.6527 2.6990 1.7395 0.0083 0.8604 -0.5093 -0.0175

    img0 = np.expand_dims(img_read(conf['data_path'][2] +
                                   'rgb/1341847981.322892.png'),
                          axis=0)
    img1 = np.expand_dims(img_read(conf['data_path'][2] +
                                   'rgb/1341847981.390810.png'),
                          axis=0)
    p0 = pose_read([-0.6569, 2.6976, 1.7395, 0.0074, 0.8602, -0.5096, -0.0167])
    p1 = pose_read([-0.6495, 2.7000, 1.7397, 0.0094, 0.8600, -0.5099, -0.0174])

    relative = mat2vec(ominus(p1, p0), mode='e')
    relative = np.expand_dims(relative, axis=0)

    # d,p,s = v.model.predict([img0,img1])
    d, p = v.model.predict([img0, img1])
    d = np.reshape(d, (480, 640))
    d = 1 / (d + 1e-3)

    # s=s.squeeze(axis=0)
    print(p)
    print(relative)

    d = np.reshape(d, (1, 480, 640, 1))
    syn_from_estimation = syn_model().predict([img1, d, p])
    syn_from_gt = syn_model().predict([img1, d, relative])

    visual = True
    if visual:
        plt.figure(1)

        i0 = plt.subplot(142)
        i0.set_title('I0')
        i0.imshow(img0.squeeze(axis=0))

        i1 = plt.subplot(141)
        i1.set_title('I1')
        i1.imshow(img1.squeeze(axis=0))

        isyn = plt.subplot(143)
        isyn.set_title('I0 Estimation from model')
        isyn.imshow(syn_from_estimation.squeeze(axis=0))

        igt = plt.subplot(144)
        igt.set_title('I0 Estimation from gt')
        igt.imshow(syn_from_gt.squeeze(axis=0))

        plt.show()

    img0 = img0.squeeze(axis=0)[40:440, 40:600, :]
    img1 = img1.squeeze(axis=0)[40:440, 40:600, :]
    syn_from_estimation = syn_from_estimation.squeeze(axis=0)[40:440,
                                                              40:600, :]
    syn_from_gt = syn_from_gt.squeeze(axis=0)[40:440, 40:600, :]

    # print('t0 t1 mae:',np.mean(np.abs(img0-img1)))
    print('模型输出位姿合成mae:', np.mean(np.abs(img0 - syn_from_estimation)))
    print('标准相对位姿合成mae:', np.mean(np.abs(img0 - syn_from_gt)))
Esempio n. 16
0
    def test_gradientFWI(self):
        r"""
        This test ensures that the FWI gradient computed with devito
        satisfies the Taylor expansion property:
        .. math::
            \Phi(m0 + h dm) = \Phi(m0) + \O(h) \\
            \Phi(m0 + h dm) = \Phi(m0) + h \nabla \Phi(m0) + \O(h^2) \\
            \Phi(m0) = .5* || F(m0 + h dm) - D ||_2^2
        where
        .. math::
            \nabla \Phi(m0) = <J^T \delta d, dm> \\
            \delta d = F(m0+ h dm) - D \\
        with F the Forward modelling operator.
        """
        initial_model_filename = "overthrust_3D_initial_model_2D.h5"
        true_model_filename = "overthrust_3D_true_model_2D.h5"

        tn = 4000

        dtype = np.float32

        so = 6

        nbl = 40

        shots_container = "shots-iso"

        shot_id = 10

        ##########

        model0 = overthrust_model_iso(initial_model_filename,
                                      datakey="m0",
                                      dtype=dtype,
                                      space_order=so,
                                      nbl=nbl)

        model_t = overthrust_model_iso(true_model_filename,
                                       datakey="m",
                                       dtype=dtype,
                                       space_order=so,
                                       nbl=nbl)

        _, geometry, _ = initial_setup(initial_model_filename,
                                       tn,
                                       dtype,
                                       so,
                                       nbl,
                                       datakey="m0")
        # rec, source_location, old_dt = load_shot(shot_id,
        #                                         container=shots_container)
        source_location = geometry.src_positions
        solver_params = {
            'h5_file': initial_model_filename,
            'tn': tn,
            'space_order': so,
            'dtype': dtype,
            'datakey': 'm0',
            'nbl': nbl,
            'origin': model0.origin,
            'spacing': model0.spacing,
            'shots_container': shots_container,
            'src_coordinates': source_location
        }

        solver = overthrust_solver_iso(**solver_params)

        true_solver_params = solver_params.copy()

        true_solver_params['h5_file'] = true_model_filename
        true_solver_params['datakey'] = "m"

        solver_true = overthrust_solver_iso(**true_solver_params)

        rec, _, _ = solver_true.forward()

        v0 = mat2vec(clip_boundary_and_numpy(model0.vp.data, model0.nbl))

        v_t = mat2vec(clip_boundary_and_numpy(model_t.vp.data, model_t.nbl))

        dm = np.float64(v_t**(-2) - v0**(-2))

        print("dm", np.linalg.norm(dm), dm.shape)

        F0, gradient = fwi_gradient_shot(vec2mat(v0, model0.shape), shot_id,
                                         solver_params, source_location)

        G = np.dot(gradient.reshape(-1), dm.reshape(-1))

        # FWI Gradient test
        H = [0.5, 0.25, .125, 0.0625, 0.0312, 0.015625, 0.0078125]
        error1 = np.zeros(7)
        error2 = np.zeros(7)
        for i in range(0, 7):
            # Add the perturbation to the model
            vloc = np.sqrt(v0**2 * v_t**2 /
                           ((1 - H[i]) * v_t**2 + H[i] * v0**2))
            m = Model(vp=vloc,
                      nbl=nbl,
                      space_order=so,
                      dtype=dtype,
                      shape=model0.shape,
                      origin=model0.origin,
                      spacing=model0.spacing,
                      bcs="damp")
            # Data for the new model
            d = solver.forward(vp=m.vp)[0]
            # First order error Phi(m0+dm) - Phi(m0)
            F_i = .5 * linalg.norm((d.data - rec.data).reshape(-1))**2
            error1[i] = np.absolute(F_i - F0)
            # Second order term r Phi(m0+dm) - Phi(m0) - <J(m0)^T \delta d, dm>
            error2[i] = np.absolute(F_i - F0 - H[i] * G)
            print(i, F0, F_i, H[i] * G)

        # Test slope of the  tests
        p1 = np.polyfit(np.log10(H), np.log10(error1), 1)
        p2 = np.polyfit(np.log10(H), np.log10(error2), 1)
        info('1st order error, Phi(m0+dm)-Phi(m0): %s' % (p1))
        info(
            r'2nd order error, Phi(m0+dm)-Phi(m0) - <J(m0)^T \delta d, dm>: %s'
            % (p2))
        print("Error 1:")
        print(error1)
        print("***")
        print("Error 2:")
        print(error2)
        assert np.isclose(p1[0], 1.0, rtol=0.1)
        assert np.isclose(p2[0], 2.0, rtol=0.1)