Beispiel #1
0
 def callback(final_solution_basename, vec):
     callback.call_count += 1
     fwi_iteration = callback.call_count
     filename = "%s_%d.h5" % (final_solution_basename, fwi_iteration)
     with profiler.get_timer('io', 'write_progress'):
         to_hdf5(vec2mat(vec, model.shape), filename)
     print(profiler.summary())
Beispiel #2
0
def compare_error(space_order=4,
                  ncp=None,
                  kernel='OT4',
                  nbpml=40,
                  filename='',
                  compression_params={},
                  **kwargs):
    grad, wrp, fw_timings, rev_timings = checkpointed_run(
        space_order, ncp, kernel, nbpml, filename, compression_params,
        **kwargs)
    print(wrp.profiler.summary())

    compression_params['scheme'] = None

    print("*************************")
    print("Starting uncompressed run:")

    grad2, wrp2, fw_timings2, rev_timings2 = checkpointed_run(
        space_order, ncp, kernel, nbpml, filename, compression_params,
        **kwargs)

    error_field = grad2.data - grad.data

    print("compression enabled norm", np.linalg.norm(grad.data))
    print("compression disabled norm", np.linalg.norm(grad2.data))
    to_hdf5(error_field, 'zfp_grad_errors_full.h5')
    print("Error norm", np.linalg.norm(error_field))
Beispiel #3
0
    def callback(progress_dir, intermediates_dir, model, exclude_boundaries, vec):
        global plot_model_to_file
        callback.call_count += 1

        if not hasattr(callback, "obj_fn_history"):
            callback.obj_fn_history = []

        callback.obj_fn_history.append(fwi_gradient.obj_fn_cache[vec.tobytes()])

        fwi_iteration = callback.call_count
        filename = os.path.join(intermediates_dir, "solution%d.h5" % fwi_iteration)
        if exclude_boundaries:
            to_hdf5(vec2mat(vec, model.shape), filename)
        else:
            to_hdf5(vec2mat(vec, model.vp.shape), filename)

        progress_filename = os.path.join(progress_dir, "fwi-iter%d.pdf" % (fwi_iteration))
        plot_model_to_file(solver.model, progress_filename)
Beispiel #4
0
def run(space_order=4, kernel='OT4', nbpml=40, filename='', **kwargs):
    if kernel in ['OT2', 'OT4']:
        solver = overthrust_setup(filename=filename,
                                  nbpml=nbpml,
                                  space_order=space_order,
                                  kernel=kernel,
                                  **kwargs)
    elif kernel == 'TTI':
        solver = overthrust_setup_tti(filename=filename,
                                      nbpml=nbpml,
                                      space_order=space_order,
                                      kernel=kernel,
                                      **kwargs)
    else:
        raise ValueError()

    return_values = solver.forward(save=False)

    u = return_values[1]
    uncompressed = u.data[0]
    to_hdf5(uncompressed, "uncompressed.h5")
Beispiel #5
0
def run(initial_model_filename, results_dir, tn, nshots, shots_container, so, nbl, kernel, scale_gradient, max_iter,
        checkpointing, n_checkpoints, compression, tolerance, reference_solution, dtype):

    if dtype == 'float32':
        dtype = np.float32
    elif dtype == 'float64':
        dtype = np.float64
    else:
        raise ValueError("Invalid dtype")

    water_depth = 20  # Number of points at the top of the domain that correspond to water
    exclude_boundaries = True  # Exclude the boundary regions from the optimisation problem
    mute_water = True  # Mute the gradient in the water region

    initial_model_filename, datakey = initial_model_filename

    model, geometry, bounds = initial_setup(initial_model_filename, tn, dtype, so, nbl,
                                            datakey=datakey, exclude_boundaries=exclude_boundaries, water_depth=water_depth)

    client = setup_dask()

    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    intermediates_dir = os.path.join(results_dir, "intermediates")

    if not os.path.exists(intermediates_dir):
        os.mkdir(intermediates_dir)

    progress_dir = os.path.join(results_dir, "progress")

    if not os.path.exists(progress_dir):
        os.mkdir(progress_dir)

    auth = default_auth()

    solver_params = {'h5_file': Blob("models", initial_model_filename, auth=auth), 'tn': tn,
                     'space_order': so, 'dtype': dtype, 'datakey': datakey, 'nbl': nbl,
                     'opt': ('noop', {'openmp': True, 'par-dynamic-work': 1000})}

    if kernel in ['OT2', 'OT4']:
        solver_params['kernel'] = kernel
        solver = overthrust_solver_iso(**solver_params)
    elif kernel == "rho":
        solver_params['water_depth'] = water_depth
        solver_params['calculate_density'] = False
        solver = overthrust_solver_density(**solver_params)
    solver._dt = 1.75
    solver.geometry.resample(1.75)

    f_args = [nshots, client, solver, shots_container, auth, scale_gradient, mute_water, exclude_boundaries, water_depth]

    if checkpointing:
        f_args += [checkpointing, {'n_checkpoints': n_checkpoints, 'scheme': compression,
                                   'tolerance': tolerance}]
    if exclude_boundaries:
        v0 = mat2vec(trim_boundary(model.vp, model.nbl)).astype(np.float64)
    else:
        v0 = mat2vec(model.vp).astype(np.float64)

    def callback(progress_dir, intermediates_dir, model, exclude_boundaries, vec):
        global plot_model_to_file
        callback.call_count += 1

        if not hasattr(callback, "obj_fn_history"):
            callback.obj_fn_history = []

        callback.obj_fn_history.append(fwi_gradient.obj_fn_cache[vec.tobytes()])

        fwi_iteration = callback.call_count
        filename = os.path.join(intermediates_dir, "solution%d.h5" % fwi_iteration)
        if exclude_boundaries:
            to_hdf5(vec2mat(vec, model.shape), filename)
        else:
            to_hdf5(vec2mat(vec, model.vp.shape), filename)

        progress_filename = os.path.join(progress_dir, "fwi-iter%d.pdf" % (fwi_iteration))
        plot_model_to_file(solver.model, progress_filename)

    callback.call_count = 0

    partial_callback = partial(callback, progress_dir, intermediates_dir, model, exclude_boundaries)

    fwi_gradient.call_count = 0
    fwd_op = solver.op_fwd(save=False)
    rev_op = solver.op_grad(save=False)
    fwd_op.ccode
    rev_op.ccode

    solution_object = minimize(fwi_gradient,
                               v0,
                               args=tuple(f_args),
                               jac=True, method='L-BFGS-B',
                               callback=partial_callback, bounds=bounds,
                               options={'disp': True, 'maxiter': max_iter})

    if exclude_boundaries:
        final_model = vec2mat(solution_object.x, model.shape)
    else:
        final_model = vec2mat(solution_object.x, model.vp.shape)

    solver.model.update("vp", final_model)

    # Save plot of final model
    final_plot_filename = os.path.join(results_dir, "final_model.pdf")
    plot_model_to_file(solver.model, final_plot_filename)

    # Save objective function values to CSV
    obj_fn_history = callback.obj_fn_history
    obj_fn_vals_filename = os.path.join(results_dir, "objective_function_values.csv")
    with open(obj_fn_vals_filename, "w") as vals_file:
        vals_writer = csv.writer(vals_file)
        for r in obj_fn_history:
            vals_writer.writerow([r])

    # Plot objective function values
    plt.title("FWI convergence")
    plt.xlabel("Iteration number")
    plt.ylabel("Objective function value")
    obj_fun_plt_filename = os.path.join(results_dir, "convergence.tex")
    obj_fun_plt_pdf_filename = os.path.join(results_dir, "convergence.pdf")

    plt.clf()
    if reference_solution is None:
        plt.plot(obj_fn_history)
    else:
        # Load reference solution convergence history
        with open(reference_solution, 'r') as reference_file:
            vals_reader = csv.reader(reference_file)
            reference_solution_values = []
            for r in vals_reader:
                reference_solution_values.append(float(r[0]))
        # Pad with 0s to ensure same size

        # Plot with legends
        plt.plot(obj_fn_history, label="Lossy FWI")
        plt.plot(reference_solution_values, label="Reference FWI")
        # Display legend
        plt.legend()
    plt.savefig(obj_fun_plt_pdf_filename)
    tikzplotlib.save(obj_fun_plt_filename)

    true_model = overthrust_model_iso("overthrust_3D_true_model_2D.h5", datakey="m", dtype=dtype, space_order=so, nbl=nbl)
    true_model_vp = trim_boundary(true_model.vp, true_model.nbl)

    error_norm = np.linalg.norm(true_model_vp - final_model)
    print("L2 norm of final solution vs true solution: %f" % error_norm)

    data = {'error_norm': error_norm,
            'checkpointing': checkpointing,
            'compression': compression,
            'tolerance': tolerance,
            'ncp': n_checkpoints}

    write_results(data, "fwi_experiment.csv")

    # Final solution
    final_solution_filename = os.path.join(results_dir, "final_solution.h5")
    to_hdf5(final_model, final_solution_filename)
Beispiel #6
0
f = h5py.File(filename, 'r')
uncompressed = f['data'][()].astype(np.dtype('float64'))
print(
    "\"Size of compressed field\", \"Compression Factor\", \"Compression time\", \"Decompression time\", \"Tolerance\", \"Error norm\", \"Maximum error\""
)
for p_i in range(0, 16):
    tolerance = 0.1**p_i
    with Timer(factor=1000) as t:
        if compressor == "zfp":
            kwargs = {'parallel': parallel, 'tolerance': tolerance}
        else:
            kwargs = {'tolerance': tolerance}
        compressed = compress(uncompressed, **kwargs)

    with Timer(factor=1000) as t2:
        if compressor == "zfp":
            kwargs = {'parallel': parallel, 'tolerance': tolerance}
        else:
            kwargs = {}
        decompressed = decompress(compressed, uncompressed.shape,
                                  uncompressed.dtype, **kwargs)

    #to_hdf5(decompressed, "decompressed-t-%d.h5"%p_i)
    error_matrix = decompressed - uncompressed
    if p_i in (0, 8, 16):
        to_hdf5(error_matrix, "error_field-%s-%d.h5" % (compressor, p_i))
    print("%f, %f, %f, %f, %.16f, %f, %f" %
          (len(compressed), len(uncompressed.tostring()) /
           float(len(compressed)), t.elapsed, t2.elapsed, tolerance,
           np.linalg.norm(error_matrix), np.max(error_matrix)))
Beispiel #7
0
def run(initial_model_filename, final_solution_basename, tn, nshots,
        shots_container, so, nbl, kernel, checkpointing, n_checkpoints,
        compression, tolerance):
    dtype = np.float32
    model, geometry, bounds = initial_setup(initial_model_filename, tn, dtype,
                                            so, nbl)

    solver_params = {
        'filename': initial_model_filename,
        'tn': tn,
        'space_order': so,
        'dtype': dtype,
        'datakey': 'm0',
        'nbl': nbl,
        'origin': model.origin,
        'spacing': model.spacing,
        'shots_container': shots_container
    }

    client = setup_dask()

    f_args = [model, geometry, nshots, client, solver_params]

    # if checkpointing:
    #    f_g = fwi_gradient_checkpointed
    #    compression_params = {'scheme': compression, 'tolerance': 10**(-tolerance)}
    #    f_args.append(n_checkpoints)
    #    f_args.append(compression_params)
    # else:

    f_g = fwi_gradient

    clipped_vp = mat2vec(clip_boundary_and_numpy(model.vp.data, model.nbl))

    def callback(final_solution_basename, vec):
        callback.call_count += 1
        fwi_iteration = callback.call_count
        filename = "%s_%d.h5" % (final_solution_basename, fwi_iteration)
        with profiler.get_timer('io', 'write_progress'):
            to_hdf5(vec2mat(vec, model.shape), filename)
        print(profiler.summary())

    callback.call_count = 0

    partial_callback = partial(callback, final_solution_basename)

    solution_object = minimize(f_g,
                               clipped_vp,
                               args=tuple(f_args),
                               jac=True,
                               method='L-BFGS-B',
                               callback=partial_callback,
                               bounds=bounds,
                               options={
                                   'disp': True,
                                   'maxiter': 60
                               })

    final_model = vec2mat(solution_object.x, model.shape)

    true_model = overthrust_model_iso("overthrust_3D_true_model_2D.h5",
                                      datakey="m",
                                      dtype=dtype,
                                      space_order=so,
                                      nbl=nbl)
    true_model_vp = clip_boundary_and_numpy(true_model.vp.data, true_model.nbl)

    error_norm = np.linalg.norm(true_model_vp - final_model)
    print(error_norm)

    data = {
        'error_norm': error_norm,
        'checkpointing': checkpointing,
        'compression': compression,
        'tolerance': tolerance,
        'ncp': n_checkpoints
    }

    write_results(data, "fwi_experiment.csv")

    to_hdf5(final_model, '%s_final.h5' % final_solution_basename)
Beispiel #8
0



description = ("Test for fixed precision mode of zfp")
parser = ArgumentParser(description=description)
parser.add_argument("filename", type=str, help="Filename")
p_parser = parser.add_mutually_exclusive_group(required=False)
p_parser.add_argument('--parallel', dest='parallel', action='store_true')
p_parser.add_argument('--no-parallel', dest='parallel', action='store_false')
parser.set_defaults(parallel=True)
args = parser.parse_args()

filename = args.filename
parallel = args.parallel

f = h5py.File(filename, 'r')
uncompressed = f['data'][()].astype(np.dtype('float32'))
print("\"Size of compressed field\", \"Compression Factor\", \"Compression time\", \"Decompression time\", \"Precision\", \"Error norm\", \"Maximum error\"")
for p_i in range(6, 20):
    precision = p_i
    with Timer(factor=1000) as t:
        compressed = compress(uncompressed, precision=precision, parallel=parallel)
    
    with Timer(factor=1000) as t2:
        decompressed = decompress(compressed, uncompressed.shape, uncompressed.dtype, precision=precision, parallel=parallel)

    to_hdf5(decompressed, "decompressed-p-%d.h5"%p_i)
    error_matrix = decompressed-uncompressed
    print("%f, %f, %f, %f, %f, %f, %f" % (len(compressed), len(uncompressed.tostring())/float(len(compressed)), t.elapsed, t2.elapsed, precision, np.linalg.norm(error_matrix), np.max(error_matrix)))
Beispiel #9
0
def verify(space_order=4,
           kernel='OT4',
           nbpml=40,
           filename='',
           compression_params={},
           **kwargs):
    solver = acoustic_setup(shape=(10, 10),
                            spacing=(10, 10),
                            nbpml=10,
                            tn=50,
                            space_order=space_order,
                            kernel=kernel,
                            **kwargs)
    #solver = overthrust_setup(filename=filename, tn=50, nbpml=nbpml, space_order=space_order, kernel=kernel, **kwargs)

    u = TimeFunction(name='u',
                     grid=solver.model.grid,
                     time_order=2,
                     space_order=solver.space_order)

    rec = Receiver(name='rec',
                   grid=solver.model.grid,
                   time_range=solver.geometry.time_axis,
                   coordinates=solver.geometry.rec_positions)
    cp = DevitoCheckpoint([u])
    n_checkpoints = None
    m = solver.model.m
    dt = solver.dt
    v = TimeFunction(name='v',
                     grid=solver.model.grid,
                     time_order=2,
                     space_order=solver.space_order)
    grad = Function(name='grad', grid=solver.model.grid)
    wrap_fw = CheckpointOperator(solver.op_fwd(save=False),
                                 src=solver.geometry.src,
                                 u=u,
                                 m=m,
                                 rec=rec,
                                 dt=dt)
    wrap_rev = CheckpointOperator(solver.op_grad(save=False),
                                  u=u,
                                  v=v,
                                  m=m,
                                  rec=rec,
                                  dt=dt,
                                  grad=grad)
    nt = rec.data.shape[0] - 2
    print("Verifying for %d timesteps" % nt)
    wrp = Revolver(cp,
                   wrap_fw,
                   wrap_rev,
                   n_checkpoints,
                   nt,
                   compression_params=compression_params)
    wrp.apply_forward()
    summary = wrp.apply_reverse()
    print(wrp.profiler.timings)

    with Timer([]) as tf:
        rec2, u2, _ = solver.forward(save=True)

    with Timer([]) as tr:
        grad2, _ = solver.gradient(rec=rec2, u=u2)

    error = grad.data - grad2.data
    to_hdf5(error, 'zfp_grad_errors.h5')
    print("Error norm", np.linalg.norm(error))

    #assert(np.allclose(grad.data, grad2.data))
    print("Checkpointing implementation is numerically verified")
    print("Verification took %d ms for forward and %d ms for reverse" %
          (tf.elapsed, tr.elapsed))