def run(model_filename, tn, nshots, so, nbl, shots_container, kernel, dtype): if dtype == 'float32': dtype = np.float32 elif dtype == 'float64': dtype = np.float64 else: raise ValueError("Invalid dtype") auth = default_auth() model = overthrust_model_density(Blob("models", model_filename, auth=auth), datakey="m", dtype=dtype, space_order=so, nbl=nbl) create_container(shots_container, auth=auth) client = setup_dask() solver_params = { 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm', 'nbl': nbl, 'water_depth': 20, 'calculate_density': True, 'kernel': kernel, 'h5_file': Blob("models", model_filename, auth=auth) } src_coords = get_source_locations(model, nshots, dtype) print("Generating shots") futures = [] for i in range(nshots): futures.append( client.submit(generate_shot, (i, src_coords[i]), solver_params=solver_params, container=shots_container, auth=auth, resources={'tasks': 1})) wait(futures) results = [f.result() for f in futures] if all(results): print( "Successfully generated %d shots and uploaded to blob storage container %s" % (nshots, shots_container)) else: raise Exception( "Some error occurred. Please check remote logs (currently logs can't come to local system)" )
def test_gradientFWI(self, auth): true_model_filename = "overthrust_3D_true_model_2D.h5" initial_model_filename = "overthrust_3D_initial_model_2D.h5" tn = 4000 dtype = np.float32 so = 16 nbl = 40 shot_id = 20 shots_container = "shots-iso-40-nbl-40-so-16" model0 = overthrust_model_iso(Blob("models", initial_model_filename, auth=auth), datakey="m0", dtype=dtype, space_order=so, nbl=nbl) model_t = overthrust_model_iso(Blob("models", true_model_filename, auth=auth), datakey="m", dtype=dtype, space_order=so, nbl=nbl) rec, source_location, _ = load_shot(shot_id, auth, container=shots_container) solver_params = { 'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm0', 'nbl': nbl, 'src_coordinates': source_location } solver = overthrust_solver_iso(**solver_params) v = model_t.vp.data v0 = model0.vp dm = np.float64(v**(-2) - v0.data**(-2)) F0, gradient = process_shot(shot_id, solver, shots_container, auth, exclude_boundaries=False) basic_gradient_test(solver, so, v0.data, v, rec, F0, gradient, dm)
def run(initial_model_filename, results_dir, tn, nshots, shots_container, so, nbl, kernel, scale_gradient, shot_number): dtype = np.float64 water_depth = 22 # Number of points at the top of the domain that correspond to water exclude_boundaries = True # Exclude the boundary regions from the optimisation problem initial_model_filename, datakey = initial_model_filename model, geometry, bounds = initial_setup( initial_model_filename, tn, dtype, so, nbl, datakey=datakey, exclude_boundaries=exclude_boundaries, water_depth=water_depth) if not os.path.exists(results_dir): os.mkdir(results_dir) print(initial_model_filename) solver_params = { 'h5_file': Blob("models", initial_model_filename), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl } print(solver_params) solver = overthrust_solver_iso(**solver_params) solver._dt = 1.75 solver.geometry.resample(1.75) process_shot(shot_number, solver, shots_container, exclude_boundaries)
def test_shot(shot_id, shots_container, exclude_boundaries): initial_model_filename = "overthrust_3D_initial_model_2D.h5" tn = 4000 dtype = np.float32 so = 6 nbl = 40 exclude_boundaries = True shot_id = 1 solver_params = { 'h5_file': Blob("models", initial_model_filename), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm0', 'nbl': nbl, 'opt': ('noop', { 'openmp': True, 'par-dynamic-work': 1000 }) } solver1 = overthrust_solver_iso(**solver_params) o1, grad1 = process_shot(shot_id, solver1, shots_container, exclude_boundaries) client = setup_dask() future = client.submit(process_shot, shot_id, solver1, shots_container, exclude_boundaries) wait(future) o2, grad2 = future.result() assert (np.allclose(grad1, grad2, atol=0., rtol=0.)) assert (o1 == o2)
def test_equivalence_shot_checkpointing(shots_container, auth): initial_model_filename = "overthrust_3D_initial_model_2D.h5" tn = 4000 dtype = np.float32 so = 6 nbl = 40 exclude_boundaries = True water_depth = 20 shot_id = 1 solver_params = { 'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm0', 'nbl': nbl, 'opt': ('noop', { 'openmp': True, 'par-dynamic-work': 1000 }) } solver1 = overthrust_solver_iso(**solver_params) solver2 = overthrust_solver_iso(**solver_params) model, geometry, _ = initial_setup(Blob("models", initial_model_filename, auth=auth), tn, dtype, so, nbl, datakey="m0", exclude_boundaries=exclude_boundaries, water_depth=water_depth) o2, grad2 = process_shot(shot_id, solver1, shots_container, auth, exclude_boundaries) o1, grad1 = process_shot_checkpointed(shot_id, solver2, shots_container, auth, exclude_boundaries) np.testing.assert_approx_equal(o1, o2, significant=5) assert (np.allclose(grad1, grad2, rtol=1e-4))
def upload_with_progressbar(filename, container, blob_name): with tqdm(total=100) as pbar: def update(done, total): pbar.update(done / total) upload_file_to_blob(filename, Blob(container, filename.split("/")[-1], auth=default_auth()), progress_callback=update)
def test_equivalence_local_remote_single_shot(shots_container): initial_model_filename, tn, dtype, so, nbl = "overthrust_3D_initial_model_2D.h5", 4000, np.float32, 6, 40 model, _, bounds = initial_setup(filename=Blob("models", initial_model_filename), tn=tn, dtype=dtype, space_order=so, nbl=nbl) solver_params = { 'h5_file': Blob("models", initial_model_filename), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm0', 'nbl': nbl } solver = overthrust_solver_iso(**solver_params) v0 = mat2vec(model.vp.data).astype(np.float64) local_results = fwi_gradient_local(v0, 1, solver, shots_container) client = setup_dask() remote_results = fwi_gradient(v0, 1, client, solver, shots_container, exclude_boundaries=False, scale_gradient=False, mute_water=False) np.testing.assert_approx_equal(local_results[0], remote_results[0]) np.testing.assert_array_almost_equal(local_results[1], remote_results[1])
def solver_params(model, auth, tn, so, dtype, nbl): initial_model_filename, datakey = model.split(":") return { 'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl, 'opt': ('noop', { 'openmp': True, 'par-dynamic-work': 1000 }) }
def test_overthrust_solver_iso(kernel, tn, src_coordinates, space_order, nbl, dtype, auth): filename = "overthrust_3D_true_model_2D.h5" src_coordinates = np.array(src_coordinates) solver = overthrust_solver_iso(Blob("models", filename, auth=auth), kernel, tn, src_coordinates, space_order, "m", nbl, dtype) assert (solver.kernel == kernel) assert (solver.geometry.tn == tn) assert (np.array_equal(solver.geometry.src_positions[0], src_coordinates)) assert (solver.space_order == space_order) assert (solver.model.nbl == nbl) assert (solver.model.dtype == dtype)
def run(initial_model_filename, results_dir, tn, nshots, shots_container, so, nbl, kernel, scale_gradient, max_iter, checkpointing, n_checkpoints, compression, tolerance, reference_solution, dtype): if dtype == 'float32': dtype = np.float32 elif dtype == 'float64': dtype = np.float64 else: raise ValueError("Invalid dtype") shot_id = 20 water_depth = 20 initial_model_filename, datakey = initial_model_filename rec, source_location, _ = load_shot(shot_id, container=shots_container) print("Source", source_location) print("rec", np.linalg.norm(rec)) solver_params = {'h5_file': Blob("models", initial_model_filename), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl, 'src_coordinates': source_location, 'opt': ('noop', {'openmp': True, 'par-dynamic-work': 1000})} if kernel in ['OT2', 'OT4']: solver_params['kernel'] = kernel solver = overthrust_solver_iso(**solver_params) elif kernel == "rho": solver_params['water_depth'] = water_depth solver_params['calculate_density'] = False solver = overthrust_solver_density(**solver_params) if not checkpointing: F0, gradient = process_shot(shot_id, solver, shots_container, exclude_boundaries=False) else: F0, gradient = process_shot_checkpointed(shot_id, solver, shots_container, exclude_boundaries=False, checkpoint_params={'n_checkpoints': n_checkpoints, 'scheme': compression, 'tolerance': tolerance}) error1, error2, H = gradient_test_errors(solver, rec, F0, gradient) data = dict(zip(H, error2)) data['compression'] = compression data['tolerance'] = tolerance write_results(data, "linearization.csv")
def run(initial_model_filename, results_dir, tn, nshots, shots_container, so, nbl, kernel, scale_gradient, max_iter, checkpointing, n_checkpoints, compression, tolerance, reference_solution, dtype): if dtype == 'float32': dtype = np.float32 elif dtype == 'float64': dtype = np.float64 else: raise ValueError("Invalid dtype") water_depth = 20 # Number of points at the top of the domain that correspond to water exclude_boundaries = True # Exclude the boundary regions from the optimisation problem mute_water = True # Mute the gradient in the water region initial_model_filename, datakey = initial_model_filename model, geometry, bounds = initial_setup(initial_model_filename, tn, dtype, so, nbl, datakey=datakey, exclude_boundaries=exclude_boundaries, water_depth=water_depth) client = setup_dask() if not os.path.exists(results_dir): os.mkdir(results_dir) intermediates_dir = os.path.join(results_dir, "intermediates") if not os.path.exists(intermediates_dir): os.mkdir(intermediates_dir) progress_dir = os.path.join(results_dir, "progress") if not os.path.exists(progress_dir): os.mkdir(progress_dir) auth = default_auth() solver_params = {'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl, 'opt': ('noop', {'openmp': True, 'par-dynamic-work': 1000})} if kernel in ['OT2', 'OT4']: solver_params['kernel'] = kernel solver = overthrust_solver_iso(**solver_params) elif kernel == "rho": solver_params['water_depth'] = water_depth solver_params['calculate_density'] = False solver = overthrust_solver_density(**solver_params) solver._dt = 1.75 solver.geometry.resample(1.75) f_args = [nshots, client, solver, shots_container, auth, scale_gradient, mute_water, exclude_boundaries, water_depth] if checkpointing: f_args += [checkpointing, {'n_checkpoints': n_checkpoints, 'scheme': compression, 'tolerance': tolerance}] if exclude_boundaries: v0 = mat2vec(trim_boundary(model.vp, model.nbl)).astype(np.float64) else: v0 = mat2vec(model.vp).astype(np.float64) def callback(progress_dir, intermediates_dir, model, exclude_boundaries, vec): global plot_model_to_file callback.call_count += 1 if not hasattr(callback, "obj_fn_history"): callback.obj_fn_history = [] callback.obj_fn_history.append(fwi_gradient.obj_fn_cache[vec.tobytes()]) fwi_iteration = callback.call_count filename = os.path.join(intermediates_dir, "solution%d.h5" % fwi_iteration) if exclude_boundaries: to_hdf5(vec2mat(vec, model.shape), filename) else: to_hdf5(vec2mat(vec, model.vp.shape), filename) progress_filename = os.path.join(progress_dir, "fwi-iter%d.pdf" % (fwi_iteration)) plot_model_to_file(solver.model, progress_filename) callback.call_count = 0 partial_callback = partial(callback, progress_dir, intermediates_dir, model, exclude_boundaries) fwi_gradient.call_count = 0 fwd_op = solver.op_fwd(save=False) rev_op = solver.op_grad(save=False) fwd_op.ccode rev_op.ccode solution_object = minimize(fwi_gradient, v0, args=tuple(f_args), jac=True, method='L-BFGS-B', callback=partial_callback, bounds=bounds, options={'disp': True, 'maxiter': max_iter}) if exclude_boundaries: final_model = vec2mat(solution_object.x, model.shape) else: final_model = vec2mat(solution_object.x, model.vp.shape) solver.model.update("vp", final_model) # Save plot of final model final_plot_filename = os.path.join(results_dir, "final_model.pdf") plot_model_to_file(solver.model, final_plot_filename) # Save objective function values to CSV obj_fn_history = callback.obj_fn_history obj_fn_vals_filename = os.path.join(results_dir, "objective_function_values.csv") with open(obj_fn_vals_filename, "w") as vals_file: vals_writer = csv.writer(vals_file) for r in obj_fn_history: vals_writer.writerow([r]) # Plot objective function values plt.title("FWI convergence") plt.xlabel("Iteration number") plt.ylabel("Objective function value") obj_fun_plt_filename = os.path.join(results_dir, "convergence.tex") obj_fun_plt_pdf_filename = os.path.join(results_dir, "convergence.pdf") plt.clf() if reference_solution is None: plt.plot(obj_fn_history) else: # Load reference solution convergence history with open(reference_solution, 'r') as reference_file: vals_reader = csv.reader(reference_file) reference_solution_values = [] for r in vals_reader: reference_solution_values.append(float(r[0])) # Pad with 0s to ensure same size # Plot with legends plt.plot(obj_fn_history, label="Lossy FWI") plt.plot(reference_solution_values, label="Reference FWI") # Display legend plt.legend() plt.savefig(obj_fun_plt_pdf_filename) tikzplotlib.save(obj_fun_plt_filename) true_model = overthrust_model_iso("overthrust_3D_true_model_2D.h5", datakey="m", dtype=dtype, space_order=so, nbl=nbl) true_model_vp = trim_boundary(true_model.vp, true_model.nbl) error_norm = np.linalg.norm(true_model_vp - final_model) print("L2 norm of final solution vs true solution: %f" % error_norm) data = {'error_norm': error_norm, 'checkpointing': checkpointing, 'compression': compression, 'tolerance': tolerance, 'ncp': n_checkpoints} write_results(data, "fwi_experiment.csv") # Final solution final_solution_filename = os.path.join(results_dir, "final_solution.h5") to_hdf5(final_model, final_solution_filename)
from fwi.overthrust import overthrust_solver_iso, overthrust_model_iso, overthrust_solver_density from fwi.io import Blob from examples.seismic import plot_shotrecord import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt # noqa: E402 initial_model_filename = "overthrust_3D_initial_model_2D.h5" true_model_filename = "overthrust_3D_true_model_2D.h5" so = 16 nbl = 40 dtype = np.float32 tn = 4000 dt = 1.75 true_model = overthrust_model_iso(Blob("models", true_model_filename), datakey="m", space_order=so, nbl=nbl, dtype=dtype) initial_model = overthrust_model_iso(Blob("models", initial_model_filename), datakey="m0", space_order=so, nbl=nbl, dtype=dtype) true_model_solver_density = overthrust_solver_density(Blob("models", true_model_filename), datakey="m", tn=tn, space_order=so, nbl=nbl, dtype=dtype) true_model_solver_iso = overthrust_solver_iso(Blob("models", true_model_filename), datakey="m", tn=tn, space_order=so, nbl=nbl, dtype=dtype) initial_model_solver_iso = overthrust_solver_iso(Blob("models", initial_model_filename), datakey="m0", tn=tn, space_order=so, nbl=nbl, dtype=dtype) # True with density rec_true_dens, _, _ = true_model_solver_density.forward(dt=dt)
def test_equivalence_checkpointing(shots_container, exclude_boundaries, scale_gradient, mute_water): initial_model_filename = "overthrust_3D_initial_model_2D.h5" tn = 4000 dtype = np.float32 so = 6 nbl = 40 water_depth = 20 nshots = 1 client = setup_dask() solver_params = { 'h5_file': Blob("models", initial_model_filename), 'tn': tn, 'space_order': so, 'dtype': dtype, 'datakey': 'm0', 'nbl': nbl } solver = overthrust_solver_iso(**solver_params) model, geometry, _ = initial_setup(initial_model_filename, tn, dtype, so, nbl, datakey="m0", exclude_boundaries=exclude_boundaries, water_depth=water_depth) if exclude_boundaries: v0 = mat2vec(np.array(trim_boundary(model.vp, model.nbl))).astype(np.float64) else: v0 = mat2vec(model.vp.data).astype(np.float64) o1, grad1 = fwi_gradient(v0, nshots, client, solver, shots_container, scale_gradient, mute_water, exclude_boundaries, water_depth, checkpointing=True) o2, grad2 = fwi_gradient(v0, nshots, client, solver, shots_container, scale_gradient, mute_water, exclude_boundaries, water_depth) print(o1, np.linalg.norm(grad1), grad1.shape) print(o2, np.linalg.norm(grad2), grad2.shape) grad1_diag = [grad1[k, k] for k in range(40)] grad2_diag = [grad2[k, k] for k in range(40)] print(grad1_diag) print(grad2_diag) np.testing.assert_approx_equal(o1, o2, significant=5) np.testing.assert_array_almost_equal(grad1, grad2, significant=5)